{
  "count": 1123,
  "items": [
    {
      "id": "b-acoustic-metamaterials-phononic-band-gaps",
      "title": "Phononic crystals exhibit acoustic band gaps analogous to electronic band gaps in semiconductors, enabling acoustic metamaterials that control sound propagation through the same mathematical framework as photonic crystals and electronic band theory.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "The acoustic wave equation in a periodic medium maps onto Bloch's theorem and band theory: phononic crystals (periodic elastic structures) develop band gaps where sound propagation is forbidden, analogous to electronic band gaps in semiconductors; the phonon dispersion relation omega(k) in a phononi",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-acoustic-topological-insulator-surface-states"
      ],
      "communication_gap": "Acoustical engineers designing noise control systems and condensed matter physicists studying electron band structure use identical mathematics but rarely collaborate; phononic crystals emerged as a field in the 1990s and the topological acoustics direction (importing topological insulator concepts into acoustics) is still maturing.\n",
      "translation_table": [
        {
          "field_a_term": "acoustic band gap (acoustics)",
          "field_b_term": "electronic band gap in semiconductor (condensed matter)",
          "note": "Both arise from Bragg reflection of waves in a periodic medium; forbidden frequency/energy range"
        },
        {
          "field_a_term": "phononic crystal unit cell (acoustics)",
          "field_b_term": "crystal primitive unit cell and Brillouin zone (condensed matter)",
          "note": "The acoustic periodicity defines a Brillouin zone for phonon wavevectors"
        },
        {
          "field_a_term": "acoustic impedance contrast Z_2/Z_1 (acoustics)",
          "field_b_term": "potential well depth / band gap magnitude (condensed matter)",
          "note": "Larger impedance mismatch opens wider band gaps, analogous to deeper potential wells"
        },
        {
          "field_a_term": "locally resonant acoustic metamaterial (acoustics)",
          "field_b_term": "tight-binding model / flat band (condensed matter)",
          "note": "Local resonators create sub-wavelength band gaps via hybridization, analogous to flat bands"
        }
      ],
      "references": [
        {
          "doi": "10.1103/PhysRevLett.71.2022",
          "note": "Kushwaha et al. (1993) - acoustic band structure in periodic elastic composites"
        },
        {
          "doi": "10.1126/science.289.5485.1734",
          "note": "Liu et al. (2000) - locally resonant sonic materials with sub-wavelength band gaps"
        },
        {
          "doi": "10.1038/nphys3458",
          "note": "Yang et al. (2015) - topological acoustics using Dirac-cone-based metamaterials"
        }
      ],
      "last_reviewed": "2026-05-07",
      "file": "cross-domain/acoustics-condensed-matter/b-acoustic-metamaterials-phononic-band-gaps.yaml"
    },
    {
      "id": "b-phononic-crystals-acoustic-band-gap-bragg",
      "title": "Phononic crystals - periodic elastic composites - open complete acoustic band gaps through Bragg scattering (wavelength ~ period) and local resonance mechanisms, making solid-state photonic crystal theory directly transferable to acoustic wave control and enabling acoustic metamaterials that break the mass-density law.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "Phononic crystals are periodic arrays of inclusions (steel spheres in epoxy, air holes in solid) with periodicity a. When acoustic wavelength lambda ~ 2a (Bragg condition), destructive interference opens a band gap - a frequency range of forbidden propagation. This is the acoustic analogue of photon",
      "open_unknowns": [],
      "related_hypotheses": [],
      "communication_gap": "Acoustical engineers and materials scientists attend separate conferences (ASA vs. MRS) despite studying the same Bragg scattering physics. Electronic band structure methods (plane-wave expansion, k.p theory) are standard in materials science but rarely used by acoustics engineers who design noise barriers empirically.\n",
      "translation_table": [
        {
          "field_a_term": "Bragg scattering condition k = pi/a (materials science)",
          "field_b_term": "frequency band gap opening at Brillouin zone boundary (acoustics)",
          "note": "Band gap opens when acoustic k = pi/a; gap width proportional to impedance contrast"
        },
        {
          "field_a_term": "phononic band structure omega(k) (materials science)",
          "field_b_term": "allowed and forbidden frequency ranges for acoustic propagation (acoustics)",
          "note": "Computed by plane-wave expansion; analogous to electronic band structure in semiconductors"
        },
        {
          "field_a_term": "effective medium theory / homogenisation (materials science)",
          "field_b_term": "sub-wavelength acoustic metamaterial with negative mass density (acoustics)",
          "note": "Near local resonance frequency, effective density becomes negative - acoustic cloaking"
        },
        {
          "field_a_term": "topological band gap (materials science)",
          "field_b_term": "topologically protected acoustic edge states in phononic crystal (acoustics)",
          "note": "Acoustic analogue of topological insulators; edge modes immune to defect scattering"
        }
      ],
      "references": [
        {
          "doi": "10.1126/science.289.5485.1734",
          "note": "Liu et al. (2000) - locally resonant sonic materials; sub-wavelength band gaps"
        },
        {
          "doi": "10.1103/PhysRevLett.71.2022",
          "note": "Kushwaha et al. (1993) - acoustic band structure of periodic elastic composites; plane-wave method"
        },
        {
          "doi": "10.1038/nphys2522",
          "note": "Huber (2016) - topological mechanics; acoustic topological states in phononic crystals"
        }
      ],
      "last_reviewed": "2026-05-07",
      "file": "cross-domain/acoustics-materials-science/b-phononic-crystals-acoustic-band-gap-bragg.yaml"
    },
    {
      "id": "b-grokking-criticality",
      "title": "The \"grokking\" generalisation transition in deep learning is a second-order phase transition governed by the same universality classes that describe magnetisation, percolation, and neural avalanches in physical systems.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "proposed",
      "bridge_claim": "Grokking — the phenomenon where a neural network suddenly transitions from memorisation to generalisation after a long plateau — exhibits sharp, non-analytic changes in the effective dimensionality of learned representations, with scaling behaviour consistent with a second-order phase transition (ar",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-grokking-criticality-universality"
      ],
      "communication_gap": "Machine learning researchers rarely read Physical Review Letters or the Journal of Statistical Physics; condensed-matter physicists rarely read NeurIPS or ICML. The vocabulary mismatch is severe: \"grokking\" is an ML neologism that physicists have not encountered; \"universality class\" and \"renormalisation group\" are physics terms that ML researchers may have heard of but do not use operationally. Grant panels for ML research do not include statistical physicists; physics panels do not include deep learning practitioners. The computational cost of training large models has created a perception barrier — physicists assume they cannot afford ML experiments — even though the relevant scaling phenomena occur in small, cheap models.\n",
      "translation_table": [
        {
          "field_a_term": "order parameter (e.g. magnetisation)",
          "field_b_term": "generalisation gap (train accuracy minus test accuracy)",
          "note": "The quantity that is zero in one phase (generalising) and non-zero in the other (memorising)"
        },
        {
          "field_a_term": "critical temperature T_c",
          "field_b_term": "grokking training step t_c",
          "note": "The control parameter value at which the phase transition occurs"
        },
        {
          "field_a_term": "universality class",
          "field_b_term": "architecture-independent generalisation exponents",
          "note": "If grokking is universal, exponents should be the same for MLPs and Transformers — as arXiv 2604.16431 suggests"
        },
        {
          "field_a_term": "renormalisation group flow",
          "field_b_term": "weight matrix spectral evolution during training",
          "note": "The progressive coarse-graining of the learned representation toward a fixed point"
        },
        {
          "field_a_term": "finite-size scaling",
          "field_b_term": "scaling of grokking step t_c with model size N and dataset size D",
          "note": "Predicts how t_c scales as N, D → ∞ — enables extrapolation without training giant models"
        },
        {
          "field_a_term": "correlation length ξ → ∞",
          "field_b_term": "long-range feature correlations at grokking",
          "note": "At the transition, the representation becomes globally coherent — a sudden increase in effective representation range"
        },
        {
          "field_a_term": "spontaneous symmetry breaking",
          "field_b_term": "selection of a generalising solution from the symmetry-degenerate memorising manifold",
          "note": "Grokking selects one generalising solution from many equivalent memorising ones"
        }
      ],
      "references": [
        {
          "arxiv": "2604.16431",
          "note": "Dimensional Criticality at Grokking Across MLPs and Transformers — seeding paper"
        },
        {
          "doi": "10.48550/arXiv.2301.05217",
          "note": "Power et al. 2022 — original grokking paper"
        },
        {
          "doi": "10.1103/PhysRevLett.85.4626",
          "note": "Callaway et al. — network percolation (same universality class candidate)"
        }
      ],
      "last_reviewed": "2026-05-04",
      "file": "cross-domain/ai-physics/b-grokking-criticality.yaml"
    },
    {
      "id": "b-openalex-renormalization-group-deep-learning",
      "title": "Deep residual networks implement a discrete renormalization group flow, where each residual block performs a coarse-graining step that preserves the relevant features while discarding irrelevant fine-grained details — the same operation that defines a renormalization group transformation in statistical physics.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "proposed",
      "bridge_claim": "The renormalization group (RG) in statistical physics is a systematic procedure for integrating out short-scale degrees of freedom while preserving long-wavelength behavior, flowing toward fixed points that determine universality classes. Deep residual networks (He et al. 2016, 219 k citations) impl",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-renormalization-group-deep-learning-criticality"
      ],
      "communication_gap": "Statistical physicists rarely attend NeurIPS; ML engineers rarely read Physical Review Letters. The vocabulary gap (coarse-graining vs. downsampling, fixed point vs. convergence, universality vs. architecture independence) masks the structural identity. Both communities have solved parts of the same puzzle in different notation.\n",
      "translation_table": [
        {
          "field_a_term": "coarse-graining step",
          "field_b_term": "residual block F(x) + x",
          "note": "Each block integrates out fine-grained detail, retaining the relevant field configuration"
        },
        {
          "field_a_term": "relevant operators",
          "field_b_term": "high-variance feature directions (top singular vectors)",
          "note": "Both survive the flow toward the fixed point; irrelevant operators decay"
        },
        {
          "field_a_term": "RG fixed point",
          "field_b_term": "converged deep representation (penultimate layer activations)",
          "note": "The attractor of repeated coarse-graining — or repeated residual refinement"
        },
        {
          "field_a_term": "universality class",
          "field_b_term": "architecture-independent generalisation behavior",
          "note": "Networks of very different widths and depths that reach the same fixed point have the same test error scaling"
        },
        {
          "field_a_term": "critical temperature T_c",
          "field_b_term": "optimal learning rate at the edge of chaos",
          "note": "Operating near T_c / edge of chaos maximises both phase transition sensitivity and representation capacity"
        }
      ],
      "references": [
        {
          "doi": "10.1109/cvpr.2016.90",
          "note": "He et al. (2016) Deep Residual Learning for Image Recognition — 219,498 citations; primary reference"
        },
        {
          "doi": "10.48550/arxiv.1410.3831",
          "note": "Mehta & Schwab (2014) — exact mapping between variational RG and deep learning"
        },
        {
          "doi": "10.1103/PhysRevLett.121.260601",
          "note": "Koch-Janusz & Ringel (2018) — mutual information and RG in neural network architectures"
        }
      ],
      "last_reviewed": "2026-05-07",
      "file": "cross-domain/ai-physics/b-openalex-renormalization-group-deep-learning.yaml"
    },
    {
      "id": "b-complex-systems-emergence",
      "title": "Emergence — the appearance of macro-level properties not predictable from micro-level rules without full simulation — is the unifying concept across all scientific domains: consciousness from neurons, wetness from H₂O, markets from trades, and ant colonies from individual ant behaviour, formalised by renormalization group theory (why coarse-graining yields qualitatively new laws) and Tononi's Integrated Information Theory (Φ as a quantitative measure).\n",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "Anderson's \"More is Different\" (1972): each level of organisation obeys its own laws not derivable from — though consistent with — lower levels. Formal definition of emergence (Bedau 1997): a system S exhibits emergence if S has properties P such that (a) individual components lack P, (b) P cannot b",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-renormalization-group-universal-emergence-laws-cross-domain"
      ],
      "communication_gap": "Emergence is discussed in every field but with incompatible definitions and without cross-domain unification. Physicists use RG to make \"emergence\" precise; philosophers debate strong vs. weak emergence; biologists discuss emergence qualitatively; computer scientists study emergence in cellular automata; social scientists use emergence loosely for anything collective. Anderson's 1972 Science paper is cited across all fields but its mathematical content (RG as the theory of emergence) is rarely followed through. IIT has been adopted by some neuroscientists but rejected by many consciousness researchers as untestable; it has not penetrated physics or social science at all.\n",
      "translation_table": [
        {
          "field_a_term": "RG fixed point (theory space)",
          "field_b_term": "emergent law valid at macroscopic scale (e.g. fluid dynamics from atomic collisions)",
          "note": "Fixed point = the effective theory that describes the coarse-grained system; Navier-Stokes is a fixed point of molecular dynamics"
        },
        {
          "field_a_term": "order parameter (e.g. magnetisation M, condensate ψ)",
          "field_b_term": "emergent collective variable (e.g. temperature, pressure, price)",
          "note": "Order parameters do not exist at the microscopic level; they appear only via coarse-graining"
        },
        {
          "field_a_term": "universality class (same RG fixed point)",
          "field_b_term": "different systems obeying identical macroscopic laws",
          "note": "Liquid-gas = Ising = polymer chain collapse: same critical exponents, different microscopic physics"
        },
        {
          "field_a_term": "Tononi Φ (integrated information)",
          "field_b_term": "quantitative measure of emergence / consciousness",
          "note": "Φ > 0 requires causal integration; Φ = 0 for any feedforward or fully disconnected system"
        },
        {
          "field_a_term": "Simon's hierarchical architecture (nearly decomposable systems)",
          "field_b_term": "modular emergence — complexity builds in layers (atoms → molecules → cells → organisms)",
          "note": "Near-decomposability enables evolution to modify one level without disrupting others"
        }
      ],
      "references": [
        {
          "doi": "10.1126/science.177.4047.393",
          "note": "Anderson (1972) More is different. Science 177:393"
        },
        {
          "note": "Holland (1998) Emergence: From Chaos to Order. Addison-Wesley"
        },
        {
          "doi": "10.1186/1471-2202-5-42",
          "note": "Tononi (2004) An information integration theory of consciousness. BMC Neurosci 5:42"
        },
        {
          "note": "Simon (1962) The architecture of complexity. Proc Am Philos Soc 106:467"
        }
      ],
      "last_reviewed": "2026-05-06",
      "file": "cross-domain/all-domains/b-complex-systems-emergence.yaml"
    },
    {
      "id": "b-scientific-method-epistemological-foundations",
      "title": "The scientific method is a cross-domain bridge in itself: Popper's falsificationism, Kuhn's paradigm shifts, Lakatos's research programmes, and Bayesian confirmation theory are competing but complementary formalisms that all fields use to distinguish knowledge from belief — and USDR bridges are explicit falsifiable predictions about structural analogies between disciplines.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "The scientific method is itself a meta-bridge connecting all empirical disciplines through a shared epistemological infrastructure. Popper's falsificationism holds that a claim is scientific if and only if it makes predictions that could be refuted by observation — no finite evidence confirms a univ",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-scientific-method-bridges-as-falsifiable-predictions"
      ],
      "communication_gap": "Philosophy of science is rarely taught to working scientists beyond an introductory course; Popper, Kuhn, and Lakatos are known names but their technical frameworks are not operationalised in day-to-day research. Bayesian reasoning is standard in statistics and machine learning but seldom applied explicitly to the meta-question of how confident we should be in theoretical frameworks. Cross-disciplinary science lacks a shared epistemological vocabulary — USDR aims to provide one.\n",
      "translation_table": [
        {
          "field_a_term": "Popper — falsifiable prediction",
          "field_b_term": "USDR bridge claim (structural analogy asserting specific mappings)",
          "note": "A bridge is refuted if the mapping breaks down under careful scrutiny"
        },
        {
          "field_a_term": "Kuhn — paradigm",
          "field_b_term": "disciplinary silo (shared methods, journals, training, terminology)",
          "note": "Cross-domain bridges are precisely what paradigms prevent by restricting scientific vision"
        },
        {
          "field_a_term": "Lakatos — progressive research programme",
          "field_b_term": "a bridge that generates novel cross-domain predictions confirmed by experiment",
          "note": "Bridges that spawn new experimental programmes (e.g. statistical mechanics → information theory) are progressive"
        },
        {
          "field_a_term": "Duhem-Quine — auxiliary hypothesis adjustment",
          "field_b_term": "rescoping a failed bridge to a narrower domain of validity",
          "note": "Analogies that fail in full generality may hold approximately or in limiting cases"
        },
        {
          "field_a_term": "Bayesian updating — P(H|E) ∝ P(E|H)·P(H)",
          "field_b_term": "accumulating cross-domain validation evidence for a bridge",
          "note": "Each independent field that confirms the same mathematical structure increases posterior probability"
        }
      ],
      "references": [
        {
          "note": "Popper, K.R. (1959) The Logic of Scientific Discovery. Hutchinson, London."
        },
        {
          "note": "Kuhn, T.S. (1962) The Structure of Scientific Revolutions. University of Chicago Press."
        },
        {
          "note": "Lakatos, I. (1978) The Methodology of Scientific Research Programmes. Cambridge University Press."
        },
        {
          "note": "Jaynes, E.T. (2003) Probability Theory: The Logic of Science. Cambridge University Press."
        }
      ],
      "last_reviewed": "2026-05-06",
      "file": "cross-domain/all-domains/b-scientific-method-epistemological-foundations.yaml"
    },
    {
      "id": "b-standard-model-unity-physics",
      "title": "The Standard Model SU(3)×SU(2)×U(1) is the most precisely tested scientific theory — its gauge symmetry framework unifies three fundamental forces while explicitly marking what it excludes as the frontier of all physics",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "The Standard Model of particle physics unifies three fundamental forces through gauge symmetry groups: U(1) electromagnetic (QED, photon), SU(2) weak force (W±, Z bosons, electroweak unification — Glashow-Salam-Weinberg, Nobel 1979), and SU(3) strong force (QCD, gluons, quark confinement, asymptotic",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-supersymmetry-electroweak-hierarchy-stabilization"
      ],
      "communication_gap": "Particle physicists and condensed matter physicists share the same mathematical tools (gauge theory, renormalization group) but publish in separate journals (Physical Review Letters vs. Physical Review B) and attend separate conferences (APS April Meeting vs. APS March Meeting). The cross-domain relevance of the Standard Model to chemistry, biology, and cosmology is understood by specialists but not integrated into curricula across fields. Most chemists and biologists are unaware that their domain is shaped by the symmetry structure of the Standard Model.\n",
      "translation_table": [
        {
          "field_a_term": "gauge symmetry group SU(3)×SU(2)×U(1) (particle physics)",
          "field_b_term": "symmetry-breaking order parameter (condensed matter / chemistry)",
          "note": "The Higgs mechanism is formally identical to the Ginzburg-Landau theory of superconductivity"
        },
        {
          "field_a_term": "renormalization group (quantum field theory)",
          "field_b_term": "universality classes in phase transitions (statistical mechanics)",
          "note": "The same RG framework that renormalizes QED divergences explains critical exponent universality"
        },
        {
          "field_a_term": "asymptotic freedom (QCD, high energy)",
          "field_b_term": "effective coupling decreasing at short distances (chemistry)",
          "note": "QCD coupling decreases at high energy — the quark-gluon plasma at the LHC corresponds to a deconfined phase"
        },
        {
          "field_a_term": "parity violation (weak force, particle physics)",
          "field_b_term": "chiral chemistry of biomolecules (biochemistry)",
          "note": "L-amino acids and D-sugars in life may trace to CP violation in the early universe — a contested bridge"
        },
        {
          "field_a_term": "Feynman diagram (QFT perturbation theory)",
          "field_b_term": "cluster expansion / diagrammatic Monte Carlo (condensed matter)",
          "note": "Diagrammatic expansion techniques developed for QFT are used in condensed matter for electron correlation"
        }
      ],
      "references": [
        {
          "doi": "10.1016/0029-5582(61)90469-2",
          "note": "Glashow (1961). Partial symmetries of weak interactions. Nucl Phys 22:579."
        },
        {
          "doi": "10.1103/PhysRevLett.19.1264",
          "note": "Weinberg (1967). A model of leptons. Phys Rev Lett 19:1264."
        },
        {
          "doi": "10.1103/PhysRevLett.30.1343",
          "note": "Gross & Wilczek (1973). Ultraviolet behavior of non-abelian gauge theories. Phys Rev Lett 30:1343."
        },
        {
          "doi": "10.1016/j.physletb.2012.08.020",
          "note": "Aad et al. (ATLAS Collaboration) (2012). Observation of a new particle in the search for the Standard Model Higgs boson. Phys Lett B 716:1."
        }
      ],
      "last_reviewed": "2026-05-06",
      "file": "cross-domain/all-domains/b-standard-model-unity-physics.yaml"
    },
    {
      "id": "b-cultural-group-selection-multilevel-theory",
      "title": "Cultural evolution drives human ultrasociality through group-level selection acting on culturally transmitted norms and institutions: multilevel selection theory (MLS) formalises this as Price equation decomposition into within-group and between-group fitness components, making evolutionary biology the quantitative framework for cultural anthropology of cooperation.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "contested",
      "bridge_claim": "Human large-scale cooperation (states, markets, armies) exceeds what kin selection and direct reciprocity can explain. Cultural group selection (CGS) proposes that groups with cooperation-enforcing norms outcompete groups without them, and that cultural transmission (imitation, teaching) enables rap",
      "open_unknowns": [],
      "related_hypotheses": [],
      "communication_gap": "Evolutionary biologists debate MLS vs. inclusive fitness on technical grounds; anthropologists use group selection narratively without engaging with Price equation formalism. The empirical question of whether human warfare and institutional competition generate sufficient between-group selection for CGS requires joint analysis rarely performed.\n",
      "translation_table": [
        {
          "field_a_term": "Price equation between-group covariance (evolutionary biology)",
          "field_b_term": "cultural selection on group-level norms / institutions (anthropology)",
          "note": "Between-group Cov(w_g, z_g) > 0 requires variance in cultural traits AND differential group fitness"
        },
        {
          "field_a_term": "cultural transmission fidelity (evolutionary biology)",
          "field_b_term": "vertical/horizontal norm transmission through imitation and teaching (anthropology)",
          "note": "High-fidelity cultural transmission maintains between-group variance required for CGS"
        },
        {
          "field_a_term": "group-level adaptation (evolutionary biology)",
          "field_b_term": "prosocial institutions (legal systems, markets, religions) (anthropology)",
          "note": "MLS predicts institutions evolve when they increase group fitness despite individual costs"
        },
        {
          "field_a_term": "within-group selection (defection advantage) (evolutionary biology)",
          "field_b_term": "free-rider problem / social dilemma within cultural groups (anthropology)",
          "note": "CGS is effective only when between-group selection exceeds within-group defection advantage"
        }
      ],
      "references": [
        {
          "doi": "10.1017/S0140525X00031551",
          "note": "Wilson & Sober (1994) - reintroducing group selection to the human behavioral sciences"
        },
        {
          "doi": "10.1016/j.jhevol.2009.01.003",
          "note": "Turchin (2006) - war and peace and war; cultural group selection in history"
        },
        {
          "doi": "10.1073/pnas.1007863107",
          "note": "Henrich et al. (2010) - markets, religion, community size, and the evolution of fairness"
        }
      ],
      "last_reviewed": "2026-05-07",
      "file": "cross-domain/anthropology-evolutionary-biology/b-cultural-group-selection-multilevel.yaml"
    },
    {
      "id": "b-aesthetic-complexity-information",
      "title": "Aesthetic preference correlates with intermediate algorithmic complexity: Birkhoff's measure M = O/C, Kolmogorov complexity, and fractal dimension operationalise the information-theoretic \"sweet spot\" between randomness and repetition, unifying aesthetics with mathematics and cognitive science.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "proposed",
      "bridge_claim": "Birkhoff (1933) defined aesthetic measure as M = O/C — order divided by complexity. High order with low complexity (a single constant tone, a uniform colour field) has M → ∞ but is perceived as boring. High complexity with low order (white noise, random pigment splatter) has M → 0 but is perceived a",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-birkhoff-kolmogorov-aesthetic-sweet-spot"
      ],
      "communication_gap": "Birkhoff published in aesthetics (Harvard University Press, 1933); Kolmogorov published in information theory (Problems of Information Transmission, 1965); Taylor et al. published in Nature (1999); Schmidhuber published in IEEE Transactions (2010); Huron published in music cognition (MIT Press, 2006). Each community developed the same core idea — intermediate complexity maximises aesthetic response — independently and without systematic cross-citation. Aesthetics and information theory have not been bridged because aesthetics has lacked quantitative methods and information theory has lacked access to aesthetic response data. The fractal work (Taylor et al.) is the most widely cited bridge but is still absent from standard art theory curricula.\n",
      "translation_table": [
        {
          "field_a_term": "Birkhoff aesthetic measure M = O/C (order/complexity)",
          "field_b_term": "Compression ratio (description length before / after coding)",
          "note": "High O/C means the pattern is highly compressible = structured"
        },
        {
          "field_a_term": "Kolmogorov complexity K(x)",
          "field_b_term": "Minimum description length of artwork / musical piece",
          "note": "K(x) ≈ 0 for repetition; K(x) ≈ log N for random noise"
        },
        {
          "field_a_term": "Fractal dimension D of visual pattern",
          "field_b_term": "Self-similarity at multiple scales (scale-free structure)",
          "note": "D measures how much complexity is added as resolution increases"
        },
        {
          "field_a_term": "Musical tension / resolution",
          "field_b_term": "Prediction error / surprise H(next note | context)",
          "note": "Shannon entropy of conditional note distribution = musical unpredictability"
        },
        {
          "field_a_term": "ITPRA theory (Huron 2006) — Imagination-Tension-Prediction-Reaction-Appraisal",
          "field_b_term": "Predictive coding in auditory cortex (hierarchical Bayesian inference)",
          "note": "Musical emotion = cortical prediction error signal; same as perception generally"
        },
        {
          "field_a_term": "Aesthetic sweet spot (intermediate preference)",
          "field_b_term": "Optimal complexity for compression learning (Schmidhuber 2010)",
          "note": "Pleasure = rate of compression progress; peaks at intermediate algorithmic complexity"
        },
        {
          "field_a_term": "Natural scene statistics (1/f power spectrum, fractal D ≈ 1.3–1.5)",
          "field_b_term": "Calibration target for aesthetic preference",
          "note": "Preference for D ≈ 1.3–1.5 is adaptive: matches natural environment statistics"
        }
      ],
      "references": [
        {
          "note": "Birkhoff (1933) Aesthetic Measure (Harvard University Press) — original M = O/C formulation"
        },
        {
          "doi": "10.1038/20833",
          "note": "Taylor, Micolich & Jonas (1999) Nature 399:422 — fractal analysis of Pollock drip paintings"
        },
        {
          "note": "Huron (2006) Sweet Anticipation: Music and the Psychology of Expectation (MIT Press)"
        },
        {
          "doi": "10.1109/TAMD.2010.2059346",
          "note": "Schmidhuber (2010) IEEE Trans Aut Mental Dev 2:230 — formal theory of creativity and curiosity via compression progress"
        },
        {
          "note": "Meyer (1956) Emotion and Meaning in Music (University of Chicago Press)"
        }
      ],
      "last_reviewed": "2026-05-05",
      "file": "cross-domain/art-and-cognition-mathematics/b-aesthetic-complexity-information.yaml"
    },
    {
      "id": "b-mirror-neurons-aesthetic-empathy",
      "title": "Mirror neurons fire both when executing an action and when observing another perform it — providing the neural substrate for motor empathy, aesthetic experience, and imitation learning, with direct implications for understanding the uncanny valley, embodied simulation in art viewing, and the neural basis of social cognition.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "proposed",
      "bridge_claim": "Rizzolatti et al. (1996) discovered \"mirror neurons\" in macaque premotor cortex (area F5) that fire both when the monkey executes a specific hand action (grasping) and when it observes another individual performing the same action. This \"mirror property\" — the neuron reflects another's action in its",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-mirror-neuron-dance-therapy"
      ],
      "communication_gap": "Mirror neurons were discovered in basic primate neuroscience (Rizzolatti et al. 1996) and their aesthetic implications were proposed by Gallese & Freedberg (2007) in a Trends in Cognitive Sciences paper. The art history and aesthetics literature has engaged with mirror neuron theory (Freedberg & Gallese started a \"neuroaesthetics\" movement) but with controversy about the translation from animal to human neuroscience and from neuroscience to art criticism. The strong claims about autism (\"broken mirror\" theory) have damaged the field's reputation by overgeneralising from limited evidence. The legitimate core insight (that aesthetic experience involves motor simulation) needs more rigorous experimental separation from speculative extensions.\n",
      "translation_table": [
        {
          "field_a_term": "Mirror neuron firing during action observation",
          "field_b_term": "Neural substrate of aesthetic empathy and motor resonance",
          "note": "The same cortical circuit that plans an action also activates when that action is perceived or imagined"
        },
        {
          "field_a_term": "Motor simulation",
          "field_b_term": "Embodied aesthetic response — feeling a painting's movement or music's groove",
          "note": "Gallese's 'embodied simulation': art activates action/emotion circuits, not just perceptual cortex"
        },
        {
          "field_a_term": "Prediction error (mirror mismatch)",
          "field_b_term": "Uncanny valley aversion — discomfort at near-human but imperfect human motion",
          "note": "Predictive processing interpretation: high prediction error for sub-human biological motion patterns"
        },
        {
          "field_a_term": "Canonical and mirror neurons together",
          "field_b_term": "Object-affordance perception and action understanding",
          "note": "Canonical neurons fire to object shape (cup = graspable); mirror neurons fire to observed grasping action"
        },
        {
          "field_a_term": "Mu rhythm suppression (8-13 Hz EEG)",
          "field_b_term": "Human proxy measure for mirror neuron activity",
          "note": "Mu suppression during action observation = desynchronization of motor cortex; indirect mirror neuron marker"
        },
        {
          "field_a_term": "Broken mirror theory (autism)",
          "field_b_term": "Hypothesis that autism spectrum disorder involves mirror neuron dysfunction",
          "note": "Proposed by Ramachandran; contested by Hickok (2009) and meta-analyses of neuroimaging data"
        },
        {
          "field_a_term": "Language-action simulation",
          "field_b_term": "Mirror-system-mediated comprehension of action language",
          "note": "Understanding 'kick' activates leg motor representations; semantic grounding via motor simulation"
        }
      ],
      "references": [
        {
          "doi": "10.1016/0926-6410(96)00020-8",
          "note": "Rizzolatti et al. (1996) Cogn Brain Res 3:131 — premotor cortex and recognition of motor actions; mirror neuron discovery"
        },
        {
          "doi": "10.1016/j.tics.2007.09.009",
          "note": "Gallese & Freedberg (2007) Trends Cogn Sci 11:197 — mirror and canonical neurons: embodied simulation and aesthetics"
        },
        {
          "note": "Mori (1970) Energy 7:33 — The uncanny valley (bukimi no tani); original Japanese publication",
          "url": "https://spectrum.ieee.org/the-uncanny-valley"
        },
        {
          "note": "Iacoboni (2008) Mirroring People (FSG) — popular account of mirror neuron theory and social cognition",
          "url": "https://us.macmillan.com/books/9780374530884/mirroringpeople"
        },
        {
          "doi": "10.1038/nrn2624",
          "note": "Hickok (2009) Nat Rev Neurosci 10:536 — eight problems for the mirror neuron theory of action understanding"
        }
      ],
      "last_reviewed": "2026-05-05",
      "file": "cross-domain/art-and-cognition-neuroscience/b-mirror-neurons-aesthetic-empathy.yaml"
    },
    {
      "id": "b-cosmic-rays-mutagenesis",
      "title": "Galactic cosmic ray flux and gamma-ray burst irradiation of Earth's biosphere have varied systematically with the solar system's galactic position, correlating with mass extinction timing and potentially modulating the long-term pace of biological evolution through elevated mutagenesis and DNA double-strand break rates.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "proposed",
      "bridge_claim": "The galactic environment of the solar system is not static. As the Sun oscillates through the galactic plane (~33 Myr period) and spirals through spiral arms (~140 Myr period), Earth's exposure to cosmic rays, gamma-ray bursts, and supernova irradiation varies by factors of several.\nCosmic rays are ",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-grb-cambrian-explosion-trigger"
      ],
      "communication_gap": "Astrophysicists studying cosmic ray acceleration and GRB populations rarely engage with the paleontological literature on mass extinction mechanisms. Evolutionary biologists and paleontologists rarely cite galactic dynamics papers. The few interdisciplinary papers (Melott, Lineweaver) exist but have not produced a sustained research program connecting galactic astrophysics observatories to fossil record analysis. Geophysicists measure cosmogenic isotopes as a flux proxy but rarely connect these to biological radiation dose rates.\n",
      "translation_table": [
        {
          "field_a_term": "galactic cosmic ray flux (GeV protons, cm^-2 s^-1)",
          "field_b_term": "DNA double-strand break rate (DSBs/cell/day)",
          "note": "Linear dose-response; DSBs scale with secondary muon flux at surface"
        },
        {
          "field_a_term": "spiral arm crossing period (~140 Myr)",
          "field_b_term": "elevated supernova rate and nearby GRB probability",
          "note": "Arms concentrate OB associations; increased ionizing radiation environment"
        },
        {
          "field_a_term": "gamma-ray burst fluence at 1 kpc",
          "field_b_term": "ozone column depletion fraction",
          "note": "~100 kJ/m^2 causes ~50% ozone loss; Melott-Thomas criterion for mass extinction"
        },
        {
          "field_a_term": "solar modulation potential (Phi, MeV)",
          "field_b_term": "suppression of low-energy cosmic ray flux reaching Earth",
          "note": "Active sun reduces surface cosmic ray flux; solar grand minima increase it"
        },
        {
          "field_a_term": "cosmogenic isotope record (^10Be in ice cores, ^14C in tree rings)",
          "field_b_term": "past cosmic ray flux proxy",
          "note": "Ice core and sediment records provide flux history over Myr timescales"
        },
        {
          "field_a_term": "Ordovician gamma-ray burst hypothesis",
          "field_b_term": "preferential extinction of surface/shallow vs. deep-water taxa",
          "note": "UV kill mechanism predicts depth-stratified extinction pattern"
        }
      ],
      "references": [
        {
          "doi": "10.1017/S1473550408004412",
          "note": "Melott & Thomas (2009) - late Ordovician mass extinction and galactic gamma-ray burst"
        },
        {
          "doi": "10.1017/S1473550404001737",
          "note": "Lineweaver et al. (2004) - Galactic Habitable Zone"
        },
        {
          "doi": "10.1086/426926",
          "note": "Melott et al. (2004) - did a gamma-ray burst initiate the late Ordovician mass extinction?"
        },
        {
          "arxiv": "astro-ph/0509785",
          "note": "Dar & De Rujula (2002) - cosmic ray radiation and mass extinctions"
        },
        {
          "doi": "10.1126/science.1153213",
          "note": "Svensmark (2007) - cosmoclimatology: solar activity, cosmic rays, and climate"
        },
        {
          "doi": "10.1016/j.newar.2006.09.005",
          "note": "Thomas & Melott (2006) - gamma-ray bursts and terrestrial planetary atmospheres"
        }
      ],
      "last_reviewed": "2026-05-05",
      "file": "cross-domain/astronomy-biology/b-cosmic-rays-mutagenesis.yaml"
    },
    {
      "id": "b-stellar-forcing-paleoclimate",
      "title": "Solar variability (Milankovitch orbital cycles, total solar irradiance variations, cosmic ray flux modulation) governs Earth's climate history — the same celestial mechanics and stellar physics that determines exoplanet habitability zones controls Dansgaard-Oeschger events, glacial terminations, and the faint young Sun paradox.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "proposed",
      "bridge_claim": "Earth's climate operates on multiple timescales governed by different aspects of solar and orbital physics. Milankovitch theory — the coupling of Earth's orbital eccentricity (100 kyr), axial obliquity (41 kyr), and precession (23 kyr) cycles to insolation distribution — accurately predicts the timi",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-amoc-fold-bifurcation-ew",
        "h-permafrost-carbon-tipping-2point5"
      ],
      "communication_gap": "Astronomy and paleoclimatology share the Milankovitch framework but otherwise operate in separate communities. Solar physicists and stellar astronomers rarely read paleoclimate journals (Nature Climate Change, Paleoceanography) and paleoclimatologists rarely read solar physics journals (Solar Physics, ApJ). The faint young Sun paradox sits explicitly at the intersection but has been addressed by each community with different tools and assumptions without a unified treatment. Exoplanet habitability researchers use Earth climate models as external inputs without integrating paleoclimate data into their uncertainty quantification. Improved interdisciplinary training and joint workshops would enable cross-validation of models across disciplines.\n",
      "translation_table": [
        {
          "field_a_term": "orbital eccentricity (Milankovitch 100 kyr cycle)",
          "field_b_term": "glacial-interglacial cycling; pacemaker of ice ages in delta-O18 record"
        },
        {
          "field_a_term": "total solar irradiance (TSI) variation",
          "field_b_term": "global mean temperature forcing; ~0.1 W/m2 per 11-year cycle"
        },
        {
          "field_a_term": "UV flux variation (stellar chromospheric activity)",
          "field_b_term": "stratospheric ozone response; top-down climate pathway via QBO and circulation"
        },
        {
          "field_a_term": "stellar evolution (main sequence luminosity increase)",
          "field_b_term": "faint young Sun paradox; Archaean climate sensitivity constraints"
        },
        {
          "field_a_term": "cosmic ray flux (galactic + solar modulation)",
          "field_b_term": "cloud condensation nuclei; Svensmark hypothesis for solar-climate link via ionization"
        },
        {
          "field_a_term": "habitable zone calculation (stellar luminosity + climate model)",
          "field_b_term": "Earth's climate sensitivity calibrates HZ boundaries used for exoplanet prioritization"
        }
      ],
      "references": [
        {
          "doi": "10.1126/science.194.4270.1121",
          "note": "Hays et al. (1976) — Milankovitch cycles confirmed in deep-sea sediment record"
        },
        {
          "doi": "10.1038/nature10385",
          "note": "Laskar et al. (2011) — strong chaos in the solar system orbital solution"
        },
        {
          "doi": "10.1073/pnas.1117601109",
          "note": "Feulner (2012) — faint young Sun paradox review; stellar evolution constraints"
        },
        {
          "doi": "10.1007/s11214-014-0059-x",
          "note": "Gray et al. (2010) — solar influences on climate; TSI, UV, and cosmic ray mechanisms"
        },
        {
          "doi": "10.1038/352412a0",
          "note": "Berger & Loutre (1991) — insolation values for the climate of the last 10 million years"
        },
        {
          "doi": "10.1007/s00585-997-0381-x",
          "note": "Svensmark & Friis-Christensen (1997) — variation of cosmic ray flux and global cloud coverage"
        }
      ],
      "last_reviewed": "2026-05-05",
      "file": "cross-domain/astronomy-climate/b-stellar-forcing-paleoclimate.yaml"
    },
    {
      "id": "b-dark-matter-substructure-x-halo-merger-tree-algorithms",
      "title": "Cold dark matter predicts hierarchical assembly: small halos form early and later merge into larger hosts — a process represented computationally by halo merger trees built from N-body simulations using recursive linking algorithms (friends-of-friends, SUBFIND-like catalogs, merger-tree builders), drawing qualitative analogies to tree data structures in algorithms despite radically different physics and noise models.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "Simulation post-processing tracks bound substructures across snapshots, assigning parent–child merge events with heuristic linking rules and uncertainty when disruptive tidal stripping fragments identification. Algorithmically, this resembles maintaining dynamic forest representations under incremen",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-merger-tree-branching-matches-subhalo-statistics-scaling"
      ],
      "communication_gap": "Cosmology codes emphasize gravity-only clustering statistics while algorithms courses emphasize worst-case complexity — cross-training rarely compares merger-tree builder failure modes with classical DS&A robustness tests.\n",
      "translation_table": [
        {
          "field_a_term": "halo merger event (host accretes satellite)",
          "field_b_term": "tree edge insertion / union linking record",
          "note": "Event semantics roughly align at lineage-graph level only."
        },
        {
          "field_a_term": "subhalo tracking ambiguity across snapshots (flybys vs mergers)",
          "field_b_term": "aliasing / pointer invalidation under adversarial updates",
          "note": "Shared engineering worry — robust definitions vs corner cases."
        },
        {
          "field_a_term": "mass threshold + linking length in group finders",
          "field_b_term": "resolution scales / epsilon-neighborhood parameters in clustering",
          "note": "Parameter sensitivity analogies — domain definitions differ."
        }
      ],
      "references": [
        {
          "doi": "10.1038/nature03597",
          "note": "Springel et al. (2005) — Millennium Simulation hierarchical clustering of galaxies and dark matter (Nature)."
        },
        {
          "doi": "10.1093/mnras/stu2039",
          "note": "Lee et al. (2014) — Sussing Merger Trees: impact of merger-tree algorithm choice on galaxy properties (MNRAS)."
        }
      ],
      "last_reviewed": "2026-05-07",
      "file": "cross-domain/astronomy-computer-science/b-dark-matter-substructure-x-halo-merger-tree-algorithms.yaml"
    },
    {
      "id": "b-doppler-redshift-x-option-adjusted-carry",
      "title": "Cosmological redshift and line-of-sight Doppler shifts ↔ option-adjusted carry and curve positioning in fixed-income markets (astronomy ↔ finance; speculative analogy)\n",
      "source_domain": "",
      "target_domain": "",
      "status": "proposed",
      "bridge_claim": "Both settings attach a signed shift to observed “prices” along a line of sight: redshift z maps photon energy to recession velocity in the radial direction, while option-adjusted spread and carry metrics encode expected return components after subtracting embedded optionality from raw yields. The br",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-doppler-carry-yield-curve-steepness-speculative-parallels"
      ],
      "communication_gap": "Speculation-heavy. Cosmologists publish error bars tied to physical models; fixed-income desks treat proprietary adjustments as competitive advantage. Without shared falsifiable predictions, cross-talk stays metaphorical. The repository records the analogy explicitly to avoid accidental reification.\n",
      "translation_table": [
        {
          "field_a_term": "observed redshift z (spectral line ratios)",
          "field_b_term": "raw yield spread before option-adjustment",
          "note": "Both are “headline” shifts that can mislead if confounders are ignored."
        },
        {
          "field_a_term": "peculiar velocity correction along LOS",
          "field_b_term": "idiosyncratic issuer risk and technical factors in bond pricing",
          "note": "Residual after removing smooth background trend."
        },
        {
          "field_a_term": "luminosity distance–redshift relation",
          "field_b_term": "yield curve as function of tenor",
          "note": "Shape diagnostics differ; only a loose structural analogy."
        }
      ],
      "references": [
        {
          "doi": "10.1086/159674",
          "note": "Hubble (1929) — empirical distance–redshift relation (historical anchor for redshift language)."
        },
        {
          "doi": "10.1086/260062",
          "note": "Black & Scholes (1973) — option pricing framework behind OAS-style adjustments (finance anchor)."
        }
      ],
      "last_reviewed": "2026-05-07",
      "file": "cross-domain/astronomy-economics/b-doppler-redshift-x-option-adjusted-carry.yaml"
    },
    {
      "id": "b-planetary-rings-viscous-accretion-disk",
      "title": "Saturn's rings and protoplanetary accretion disks obey the same viscous spreading equation: both are Keplerian disk systems where angular-momentum transport by viscosity (collisional in rings, turbulent in disks) determines radial evolution, making ring dynamics a laboratory-scale test-bed for protoplanetary disk physics.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "The viscous evolution of a Keplerian disk is governed by the diffusion equation: d_Sigma/d_t = (3/r) d/dr [r^{1/2} d/dr (nu Sigma r^{1/2})], where Sigma is surface density and nu is kinematic viscosity. This equation applies equally to Saturn's B ring (nu ~ 10-100 cm^2/s from inter-particle collisio",
      "open_unknowns": [],
      "related_hypotheses": [],
      "communication_gap": "Planetary ring dynamicists and protoplanetary disk theorists publish in separate journals (Icarus vs. ApJ) and use different vocabulary (collisional viscosity vs. alpha-disk turbulent viscosity) for the same mathematical object, despite sharing the viscous disk equation.\n",
      "translation_table": [
        {
          "field_a_term": "kinematic viscosity nu (fluid mechanics)",
          "field_b_term": "ring particle collision rate * mean-free path^2 (astronomy)",
          "note": "Rings have nu ~ 10-100 cm^2/s; protoplanetary disks have alpha-disk nu ~ 10^14-10^16 cm^2/s"
        },
        {
          "field_a_term": "viscous spreading / angular momentum diffusion (fluid mechanics)",
          "field_b_term": "radial spreading of ring edges over ~10^8 yr (astronomy)",
          "note": "Cassini measurements constrain ring age and origin via viscous spreading timescale"
        },
        {
          "field_a_term": "Lindblad resonance spiral density wave (fluid mechanics)",
          "field_b_term": "bending and density waves in Saturn's A ring from moon resonances (astronomy)",
          "note": "Spiral density wave pattern provides most accurate measurement of ring surface density"
        },
        {
          "field_a_term": "gap opening criterion (q_planet / alpha > few) (fluid mechanics)",
          "field_b_term": "Cassini Division and Encke Gap opened by Mimas and Pan (astronomy)",
          "note": "Same dimensionless criterion as planet-in-disk gap opening"
        }
      ],
      "references": [
        {
          "doi": "10.1051/0004-6361/201219240",
          "note": "Tiscareno et al. (2013) - structural kinematic and photometric properties of Saturn's rings from Cassini"
        },
        {
          "doi": "10.1086/152434",
          "note": "Lynden-Bell & Pringle (1974) - evolution of viscous disks and the origin of nebular variables"
        },
        {
          "doi": "10.1016/j.icarus.2007.02.013",
          "note": "Colwell et al. (2009) - structure and dynamics of Saturn's rings"
        }
      ],
      "last_reviewed": "2026-05-07",
      "file": "cross-domain/astronomy-fluid-mechanics/b-planetary-rings-viscous-accretion-disk.yaml"
    },
    {
      "id": "b-blackhole-information-paradox",
      "title": "The black hole information paradox is an information-theoretic crisis: whether quantum gravity destroys von Neumann entropy is equivalent to whether the black hole acts as a quantum channel with zero capacity, and the holographic principle (AdS/CFT) resolves this by identifying bulk gravity with a boundary quantum error-correcting code.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "proposed",
      "bridge_claim": "Hawking's 1974 calculation showed that black holes radiate thermally, apparently destroying the quantum information contained in infalling matter. This is the information paradox: unitary quantum mechanics forbids information destruction, yet semiclassical gravity predicts it.\nThe bridge to informat",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-holographic-encoding-hawking-radiation"
      ],
      "communication_gap": "The astrophysics literature treats black hole thermodynamics as a property of spacetime geometry. The quantum information literature treats the information paradox as a problem in quantum channel theory. Despite Susskind's \"black hole wars\" bridging efforts and the 2019 island formula breakthrough, most astrophysicists remain unaware of quantum error correction results that directly constrain black hole microphysics, and most quantum information theorists are unaware that the black hole geometry imposes specific constraints on decoding complexity.\n",
      "translation_table": [
        {
          "field_a_term": "Bekenstein-Hawking entropy S = A/4",
          "field_b_term": "von Neumann entropy S = -Tr(rho log rho)",
          "note": "Formally identical; both measure missing information about the microstate"
        },
        {
          "field_a_term": "Hawking radiation",
          "field_b_term": "output of a quantum channel",
          "note": "The question of whether the channel has positive quantum capacity is the information paradox"
        },
        {
          "field_a_term": "Page curve (entropy of radiation vs. time)",
          "field_b_term": "quantum channel capacity trajectory",
          "note": "Page curve rising then falling = information recovery = positive quantum capacity"
        },
        {
          "field_a_term": "Island formula / replica wormholes",
          "field_b_term": "quantum error-correcting code decoding",
          "note": "Replica wormholes are the gravitational saddle points that implement the decoding"
        },
        {
          "field_a_term": "black hole scrambling time ~ M log M (Planck units)",
          "field_b_term": "quantum information scrambling (out-of-time-order correlators)",
          "note": "Black holes are conjectured to be the fastest scramblers in nature (Sekino-Susskind)"
        },
        {
          "field_a_term": "AdS/CFT holographic duality",
          "field_b_term": "quantum error-correcting code (bulk = logical, boundary = physical)",
          "note": "Almheiri-Dong-Harlow 2015 made this identification explicit and precise"
        }
      ],
      "references": [
        {
          "doi": "10.1103/PhysRevD.14.2460",
          "note": "Hawking (1976) - breakdown of predictability in gravitational collapse; the paradox stated"
        },
        {
          "arxiv": "hep-th/9306083",
          "note": "Page (1993) - information in black hole radiation; the Page curve"
        },
        {
          "arxiv": "0708.4025",
          "note": "Hayden & Preskill (2007) - black holes as mirrors; fast scrambling"
        },
        {
          "arxiv": "1411.7041",
          "note": "Almheiri, Dong & Harlow (2015) - bulk locality and quantum error correction in AdS/CFT"
        },
        {
          "arxiv": "1905.08762",
          "note": "Penington (2019) - entanglement wedge reconstruction and the information paradox"
        },
        {
          "arxiv": "1911.11977",
          "note": "Almheiri et al. (2019) - island formula and replica wormholes"
        }
      ],
      "last_reviewed": "2026-05-05",
      "file": "cross-domain/astronomy-information/b-blackhole-information-paradox.yaml"
    },
    {
      "id": "b-neural-operator-x-space-weather-data-assimilation",
      "title": "Neural operators for plasma dynamics bridge operator learning and space-weather data assimilation workflows.",
      "source_domain": "",
      "target_domain": "",
      "status": "proposed",
      "bridge_claim": "Speculative analogy (to be empirically validated): Neural-operator surrogates for coupled plasma dynamics can be integrated into sequential data-assimilation loops similarly to reduced-order forecast operators used in operational space-weather pipelines.",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-neural-operator-assimilation-improves-space-weather-lead-time"
      ],
      "communication_gap": "Domain operators prioritize interpretable reliability diagnostics, while ML work often prioritizes aggregate accuracy without deployment-grade uncertainty audits.",
      "translation_table": [
        {
          "field_a_term": "model prior",
          "field_b_term": "domain prior",
          "note": "Both constrain inference in data-sparse regimes."
        },
        {
          "field_a_term": "uncertainty estimate",
          "field_b_term": "risk-aware decision support",
          "note": "Uncertainty quality determines practical utility."
        },
        {
          "field_a_term": "out-of-distribution behavior",
          "field_b_term": "deployment robustness",
          "note": "Shift sensitivity governs real-world reliability."
        }
      ],
      "references": [
        {
          "arxiv": "2010.08895",
          "note": "Fourier Neural Operator baseline."
        }
      ],
      "last_reviewed": "2026-05-08",
      "file": "cross-domain/astronomy-machine-learning/b-neural-operator-x-space-weather-data-assimilation.yaml"
    },
    {
      "id": "b-celestial-mechanics-kam-chaos",
      "title": "The long-term stability of planetary orbits is determined by the Kolmogorov-Arnold-Moser (KAM) theorem: quasi-periodic orbits persist on invariant tori in phase space provided the perturbation is small and the frequency ratio is sufficiently irrational (Diophantine condition), while resonant orbits are destroyed, leading to the chaotic diffusion observed in the asteroid belt and in Laskar's numerical simulations of the inner solar system.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "Classical celestial mechanics (Laplace, Lagrange) proved orbital stability to first order in planetary mass ratios. KAM theory (Kolmogorov 1954, Arnold 1963, Moser 1962) proved that nearly-integrable Hamiltonian systems (like the solar system in the small-mass limit) possess a positive-measure set o",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-mercury-orbit-chaotic-diffusion-eccentricity"
      ],
      "communication_gap": "KAM theory is one of the deepest results in 20th century mathematics (Arnol'd published in Russian in 1963; English translations lagged by years). Celestial mechanicians adopted it slowly; Laskar's chaotic solar system simulations (1989) were the first widely accessible demonstration of its implications. Applied orbital mechanics and astrodynamics engineers rarely engage with the full KAM mathematical formalism.\n",
      "translation_table": [
        {
          "field_a_term": "Planetary orbital frequency ratio ω_i/ω_j",
          "field_b_term": "Frequency ratio of the KAM torus",
          "note": "Irrational (Diophantine) ratios → stable KAM torus; rational ratios → resonance → chaos"
        },
        {
          "field_a_term": "Mean-motion resonance (e.g., Jupiter-Saturn 5:2)",
          "field_b_term": "Destroyed KAM torus (resonance overlap / Chirikov criterion)",
          "note": "Resonance overlaps destroy intervening tori; Kirkwood gaps in asteroid belt are cleared resonances"
        },
        {
          "field_a_term": "Lyapunov exponent of Mercury's orbit (τ ~ 5 Myr)",
          "field_b_term": "Inverse of the Lyapunov time for a chaotic trajectory in phase space",
          "note": "Positive Lyapunov exponent indicates exponential sensitivity to initial conditions"
        },
        {
          "field_a_term": "Action-angle variables (J, θ) of integrable Kepler problem",
          "field_b_term": "KAM torus coordinates in the nearly-integrable perturbation",
          "note": "KAM theorem guarantees persistence of tori in (J, θ) space under small perturbation H = H₀ + εH₁"
        }
      ],
      "references": [
        {
          "doi": "10.1038/338237a0",
          "note": "Laskar (1989) Nature – numerical evidence for chaotic behavior of the inner solar system; Lyapunov exponents"
        },
        {
          "doi": "10.1070/RM1963v018n05ABEH004130",
          "note": "Arnold (1963) – proof of KAM theorem for analytic Hamiltonians (Russian Math. Surveys)"
        },
        {
          "doi": "10.1086/588006",
          "note": "Laskar & Gastineau (2009) Nature – existence of collisional trajectories of Mercury in 1% of integrations"
        },
        {
          "doi": "10.1007/s10569-008-9179-4",
          "note": "Murray & Holman – origin of chaos in the outer solar system; resonance overlap criterion"
        }
      ],
      "last_reviewed": "2026-05-07",
      "file": "cross-domain/astronomy-mathematics/b-celestial-mechanics-kam-chaos.yaml"
    },
    {
      "id": "b-exoplanet-spectral-retrieval-bayesian",
      "title": "Exoplanet atmospheric composition is inferred by Bayesian spectral retrieval: the posterior P(θ|d) over temperature-pressure profile and molecular abundances is sampled via nested sampling or MCMC",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "Atmospheric retrieval solves the inverse problem: given a transit or emission spectrum d (flux vs. wavelength) observed by HST/JWST, infer the atmospheric state vector θ = {T(P), X_H₂O, X_CO₂, X_CH₄, ...} (temperature-pressure profile and molecular abundances). The forward model M(θ) computes the sy",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-exoplanet-spectral-retrieval-bayesian"
      ],
      "communication_gap": "Astronomers who perform retrieval analyses use MultiNest or emcee as black-box samplers without typically engaging with the broader statistical literature on high-dimensional Bayesian inversion, regularization, and identifiability. The same inverse problem structure appears in geophysical tomography and medical imaging CT reconstruction, and insights from those fields (compressed sensing, total variation regularization) are underexplored in atmospheric retrieval.\n",
      "translation_table": [
        {
          "field_a_term": "transit depth spectrum F_in(λ)/F_out(λ)",
          "field_b_term": "data vector d in the Bayesian likelihood L(d|θ)",
          "note": "JWST provides ~100-1000 spectral channels; each wavelength is one datum"
        },
        {
          "field_a_term": "T-P profile T(P) (temperature as function of pressure)",
          "field_b_term": "high-dimensional parameter subspace θ_T requiring regularization",
          "note": "Typical parameterization: 5-10 nodes with monotonicity prior to prevent non-physical profiles"
        },
        {
          "field_a_term": "molecular abundance X_i (volume mixing ratio)",
          "field_b_term": "model parameter with log-uniform prior over ~8 orders of magnitude",
          "note": "Log-uniform prior reflects prior ignorance over orders-of-magnitude range"
        },
        {
          "field_a_term": "nested sampling evidence Z = ∫L(θ)π(θ)dθ",
          "field_b_term": "Bayesian model evidence for model comparison (clear vs. cloudy atmosphere)",
          "note": "Bayes factor Z₁/Z₂ quantifies evidence for clouds, hazes, or specific molecular detections"
        }
      ],
      "references": [
        {
          "doi": "10.1086/680119",
          "note": "Madhusudhan & Seager (2009) A temperature and abundance retrieval method for exoplanet atmospheres. ApJ 707:24"
        },
        {
          "doi": "10.1093/mnras/stx3021",
          "note": "Buchner et al. (2014) X-ray spectral modelling of the AGN obscuring region. A&A 564:A125"
        },
        {
          "doi": "10.3847/1538-4357/ab48e6",
          "note": "Tsiaras et al. (2019) Water vapour in the atmosphere of the habitable-zone eight-Earth-mass planet K2-18b. Nat Astron 3:1086"
        }
      ],
      "last_reviewed": "2026-05-07",
      "file": "cross-domain/astronomy-mathematics/b-exoplanet-spectral-retrieval-bayesian.yaml"
    },
    {
      "id": "b-frb-random-matrix",
      "title": "The non-Poissonian, power-law waiting-time statistics of repeating fast radio burst sources share the eigenvalue repulsion and universality-class signatures of random matrix theory (GUE/GOE), suggesting that FRB emission physics is governed by quantum-chaotic dynamics analogous to those seen in nuclear resonances, quantum dots, and classically chaotic billiards.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "proposed",
      "bridge_claim": "Fast radio bursts (FRBs) are millisecond-duration radio transients of cosmological origin. Repeating FRB sources (FRB 20121102A, FRB 20201124A, and ~50 others in CHIME/FRB catalogs) exhibit complex temporal clustering: their waiting-time distributions (time between consecutive bursts) are not Poisso",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-frb-gue-universality-magnetar"
      ],
      "communication_gap": "Radio astronomers working on FRB statistics publish in ApJ, MNRAS, and Nature Astronomy and are unfamiliar with the RMT literature (primarily in Annals of Physics, Physical Review Letters, and nuclear physics journals). Mathematicians working on random matrix universality rarely engage with astrophysical time series. The few papers applying RMT-adjacent methods (Weibull fits, self-organized criticality) to FRBs do not invoke RMT universality class language, missing the predictive power that comes with the full mathematical framework.\n",
      "translation_table": [
        {
          "field_a_term": "FRB inter-burst waiting time distribution",
          "field_b_term": "energy level spacing distribution P(s) in RMT",
          "note": "Burst times -> eigenvalues; waiting times -> level spacings"
        },
        {
          "field_a_term": "Weibull distribution of FRB waiting times (k < 1)",
          "field_b_term": "Wigner surmise P(s) ~ s exp(-pi s^2/4) for GOE",
          "note": "Both deviate from exponential (Poisson) in the same direction at short times"
        },
        {
          "field_a_term": "FRB burst clustering / \"active windows\"",
          "field_b_term": "spectral rigidity (Sigma^2) and long-range correlations in RMT",
          "note": "RMT predicts logarithmic long-range correlations; Poisson predicts linear"
        },
        {
          "field_a_term": "magnetar crustal fracture avalanches (SOC)",
          "field_b_term": "quantum chaotic system with many coupled degrees of freedom",
          "note": "Both produce RMT-class statistics; distinguishing them requires the 3-point function"
        },
        {
          "field_a_term": "FRB source physical mechanism (magnetar vs. other)",
          "field_b_term": "RMT symmetry class (GOE vs. GUE vs. GSE)",
          "note": "Time-reversal symmetry of emission mechanism determines universality class"
        },
        {
          "field_a_term": "CHIME/FRB repeater catalog statistics",
          "field_b_term": "large-N RMT ensembles (N -> infinity universality)",
          "note": "Requires ~100 bursts per source for reliable universality class assignment"
        }
      ],
      "references": [
        {
          "doi": "10.1093/mnras/stab3643",
          "note": "Oppermann et al. (2018) - FRB repetition statistics and waiting time distributions"
        },
        {
          "arxiv": "2107.02279",
          "note": "CHIME/FRB Collaboration (2021) - first CHIME/FRB catalog; non-Poissonian statistics"
        },
        {
          "doi": "10.1103/RevModPhys.53.385",
          "note": "Bohigas, Giannoni & Schmit (1984) - classical chaos and RMT universality (BGS conjecture)"
        },
        {
          "doi": "10.1016/S0003-4916(62)90030-6",
          "note": "Wigner (1962) - characteristic vectors of bordered matrices; RMT foundations"
        },
        {
          "arxiv": "2109.11535",
          "note": "Xu et al. (2022) - FRB 20201124A burst statistics showing non-Poissonian clustering"
        },
        {
          "doi": "10.1103/PhysRevLett.52.1",
          "note": "Bohigas, Giannoni & Schmit (1984) - PRL version; RMT universality for quantum chaos"
        }
      ],
      "last_reviewed": "2026-05-05",
      "file": "cross-domain/astronomy-mathematics/b-frb-random-matrix.yaml"
    },
    {
      "id": "b-helioseismology-x-inverse-eigenvalue-problems",
      "title": "Global helioseismology infers solar interior structure by matching observed eigenfrequencies ω_nl of acoustic modes to stellar oscillation equations — structurally parallel to classical inverse Sturm–Liouville / vibrating-string eigenvalue problems asking which potentials reproduce measured spectra — placing asteroseismology inside inverse spectral geometry narratives taught in applied mathematics departments.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "Linear adiabatic oscillation equations yield eigenvalue problems for pressure modes (p-modes) whose eigenfrequencies densely sample interior sound-speed profiles c(r) — analogous to recovering q(x) in −y'' + q(x) y = λ y from spectral data under appropriate transforms (inverse scattering intuition).",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-helioseismology-x-inverse-eigenvalue-problems"
      ],
      "communication_gap": "Solar physics tutorials emphasize observational pipeline details over Fredholm operator notation; inverse-problem mathematicians rarely assign homework using MDI/HMI frequency tables despite pedagogical alignment.\n",
      "translation_table": [
        {
          "field_a_term": "Mode eigenfrequencies ω_nl (helioseismology)",
          "field_b_term": "Spectral sequence λ_n for differential operators with unknown coefficients",
          "note": "Same question — recover coefficients from eigen-spectrum — though solar operators are vectorial and spherically stratified."
        },
        {
          "field_a_term": "Eigenfunctions ξ_nl(r) contributing kernels to frequency shifts",
          "field_b_term": "Mode shapes weighting coefficients in inverse-Sturm–Liouville literature",
          "note": "Fredholm kernels parallel sensitivity functions ∂ω/∂c(r)."
        },
        {
          "field_a_term": "Regularized least-squares sound-speed inversions",
          "field_b_term": "Tikhonov regularization in inverse spectral problems",
          "note": "Shared numerical analysis stacks for ill-posed linearized updates."
        }
      ],
      "references": [
        {
          "doi": "10.1146/annurev.astro.41.1.251",
          "note": "Christensen-Dalsgaard (2003) ARA&A — helioseismology review including oscillation equations"
        },
        {
          "doi": "10.1098/rspa.1978.0057",
          "note": "Gelfand & Levitan tradition inverse spectral citation cluster — mathematical lineage context"
        }
      ],
      "last_reviewed": "2026-05-07",
      "file": "cross-domain/astronomy-mathematics/b-helioseismology-x-inverse-eigenvalue-problems.yaml"
    },
    {
      "id": "b-baryon-asymmetry-cp-violation",
      "title": "The cosmological matter-antimatter asymmetry (baryon-to-photon ratio eta ~ 6e-10) demands CP-violating physics beyond the Standard Model: the observed CKM CP violation is ten orders of magnitude too small, linking baryogenesis directly to the open problem of CP violation in leptonic and hadronic sectors.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "proposed",
      "bridge_claim": "The observed universe contains approximately one baryon per 10^9 photons (eta_B ~ 6e-10, measured by CMB and Big Bang nucleosynthesis). A universe that begins matter-antimatter symmetric cannot arrive at this state without satisfying Sakharov's three conditions (1967): baryon number violation (B), C",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-leptogenesis-sm-cp-insufficient"
      ],
      "communication_gap": "Cosmologists characterise the baryon asymmetry as a number to be explained. Particle physicists study CP violation through precision measurements of meson decays, neutrino oscillations, and electric dipole moments. Despite the direct connection, few experimental collaborations frame their neutrino oscillation results in terms of baryogenesis viability; the communities publish in separate journals (JCAP/ApJ vs. PRL/PRD) and the interdisciplinary connection is made mainly in theory review articles rather than experimental programs.\n",
      "translation_table": [
        {
          "field_a_term": "baryon-to-photon ratio eta_B ~ 6e-10",
          "field_b_term": "CP-violating phase delta_CP in PMNS neutrino mixing matrix",
          "note": "Leptogenesis connects these; measuring delta_CP tests baryogenesis"
        },
        {
          "field_a_term": "Sakharov condition 2 (CP violation)",
          "field_b_term": "CKM/PMNS phase and electric dipole moments",
          "note": "SM CKM CP violation is 10^10 too small; beyond-SM CP violation required"
        },
        {
          "field_a_term": "electroweak sphaleron (B+L violation)",
          "field_b_term": "non-perturbative electroweak topology change",
          "note": "Sphalerons convert lepton to baryon number; freeze out at T_EW ~ 130 GeV"
        },
        {
          "field_a_term": "electroweak phase transition (first-order vs. crossover)",
          "field_b_term": "departure from thermal equilibrium (Sakharov condition 3)",
          "note": "SM has a crossover, not a first-order transition; requires BSM physics for strong EWPT"
        },
        {
          "field_a_term": "heavy Majorana neutrino mass (leptogenesis scale)",
          "field_b_term": "seesaw mechanism for light neutrino mass",
          "note": "m_nu ~ v^2 / M_R; measuring light neutrino masses constrains leptogenesis scale"
        }
      ],
      "references": [
        {
          "doi": "10.1070/PU1991v034n05ABEH002497",
          "note": "Sakharov (1967, translated 1991) - violation of CP invariance, C asymmetry, and baryon asymmetry of the universe"
        },
        {
          "doi": "10.1016/0370-2693(86)91126-3",
          "note": "Fukugita & Yanagida (1986) - baryogenesis without grand unification; leptogenesis proposal"
        },
        {
          "arxiv": "hep-ph/0210271",
          "note": "Davidson & Ibarra (2002) - lower bound on the leptogenesis scale"
        },
        {
          "arxiv": "1710.09838",
          "note": "Bodeker & Buchmuller (2021) - review of baryogenesis from the weak scale"
        },
        {
          "doi": "10.1103/RevModPhys.88.015004",
          "note": "Morrissey & Ramsey-Musolf (2012) - electroweak baryogenesis review"
        }
      ],
      "last_reviewed": "2026-05-05",
      "file": "cross-domain/astronomy-physics/b-baryon-asymmetry-cp-violation.yaml"
    },
    {
      "id": "b-dark-energy-vacuum-cosmological-constant",
      "title": "The observed cosmological constant Λ ≈ 1.11 × 10⁻⁵² m⁻² driving accelerated cosmic expansion corresponds to a vacuum energy density ρ_Λ = Λc²/(8πG) ≈ 5.4 × 10⁻¹⁰ J/m³, which is ~120 orders of magnitude smaller than the naive quantum-field-theory estimate of zero-point energies — the cosmological constant problem is the largest numerical discrepancy in physics.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "contested",
      "bridge_claim": "Einstein introduced Λ as a static-universe term (1917); Perlmutter and Riess (1998/1999) discovered dark energy from supernovae — cosmic expansion is accelerating, requiring a non-zero Λ > 0. The bridge to quantum field theory: the zero-point energy of all quantum fields contributes to the vacuum en",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-dark-energy-quintessence-equation-of-state-variation"
      ],
      "communication_gap": "Cosmologists using Λ as a phenomenological fit parameter and quantum field theorists computing vacuum energies publish in different communities. Weinberg (1989) Reviews of Modern Physics stated the problem cleanly but no solution has emerged in 37 years. The disconnect between observational cosmology, high-energy theory, and gravitational physics is structural and deep.\n",
      "translation_table": [
        {
          "field_a_term": "Cosmological constant Λ (Einstein field equations)",
          "field_b_term": "Vacuum energy density ρ_Λ = Λc²/(8πG) of quantum fields",
          "note": "GR parameter Λ is identified with ρ_vac from QFT; the identification creates the CC problem"
        },
        {
          "field_a_term": "Observed dark energy equation of state w ≈ −1 (SN Ia, CMB)",
          "field_b_term": "Vacuum energy equation of state p = −ρc² (negative pressure)",
          "note": "w = −1 is consistent with a pure cosmological constant / vacuum energy"
        },
        {
          "field_a_term": "QFT zero-point energy cutoff at Planck scale E_Pl",
          "field_b_term": "Ultraviolet divergence of vacuum energy (regulated)",
          "note": "Cutoff at E_Pl gives ρ_vac ~ E_Pl⁴/(ℏ³c⁵) ~ 10⁹⁴ kg/m³; requires 120-order cancellation"
        },
        {
          "field_a_term": "Supersymmetry (bosonic and fermionic zero-point energies cancel)",
          "field_b_term": "Proposed solution: bosonic +ℏω/2 cancels fermionic −ℏω/2",
          "note": "Unbroken SUSY would give Λ = 0 exactly; broken SUSY at TeV scale still leaves ~10⁶⁰ excess"
        }
      ],
      "references": [
        {
          "doi": "10.1103/RevModPhys.61.1",
          "note": "Weinberg (1989) Rev. Mod. Phys. – the cosmological constant problem; 120-order discrepancy"
        },
        {
          "doi": "10.1086/307221",
          "note": "Perlmutter et al. (1999) ApJ – measurements of Ω and Λ from 42 high-z supernovae; dark energy discovery"
        },
        {
          "doi": "10.1086/300499",
          "note": "Riess et al. (1998) AJ – observational evidence for supernovae acceleration; dark energy"
        },
        {
          "doi": "10.1146/annurev-astro-081811-125543",
          "note": "Weinberg et al. (2013) – observational probes of cosmic acceleration; dark energy review"
        }
      ],
      "last_reviewed": "2026-05-07",
      "file": "cross-domain/astronomy-physics/b-dark-energy-vacuum-cosmological-constant.yaml"
    },
    {
      "id": "b-dark-matter-phase-transition-relics",
      "title": "Cosmological dark matter candidates are thermal or non-thermal relics of specific early-universe phase transitions — WIMPs from electroweak freeze-out, axions from the QCD phase transition at 150 MeV, and primordial black holes from density fluctuations — connecting galactic-scale astrophysical observations to statistical mechanics of symmetry breaking in the early universe.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "proposed",
      "bridge_claim": "The identity of dark matter is inseparable from the statistical physics of phase transitions in the early universe. Each major dark matter candidate is a relic of a specific transition:\nWIMPs (Weakly Interacting Massive Particles): produced thermally at the electroweak phase transition (T_EW ~ 130 G",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-dark-matter-qcd-axion-phase-relic"
      ],
      "communication_gap": "Astrophysicists measuring dark matter properties (galactic rotation curves, CMB power spectrum, gravitational lensing) rarely engage with the QCD phase transition literature; nuclear theorists computing lattice QCD thermodynamics rarely frame their results in terms of dark matter constraints. The WIMP miracle is well-known across communities, but the QCD-axion and QCD-PBH connections remain largely siloed in specialist subfields.\n",
      "translation_table": [
        {
          "field_a_term": "dark matter relic density Omega_DM h^2 ~ 0.12",
          "field_b_term": "freeze-out abundance from Boltzmann equation (non-equilibrium stat. mech.)",
          "note": "The WIMP miracle connects DM abundance to electroweak phase structure"
        },
        {
          "field_a_term": "WIMP mass ~ 100 GeV - 1 TeV",
          "field_b_term": "electroweak phase transition temperature T_EW ~ 130 GeV",
          "note": "Mass scale set by where thermal freeze-out produces correct relic density"
        },
        {
          "field_a_term": "axion mass m_a ~ 10^-5 eV (for f_a ~ 10^12 GeV)",
          "field_b_term": "QCD topological susceptibility chi_top(T)",
          "note": "Axion mass from instanton effects; directly measurable in lattice QCD"
        },
        {
          "field_a_term": "axion coherent oscillation (misalignment mechanism)",
          "field_b_term": "symmetry restoration / breaking at QCD crossover (T ~ 150 MeV)",
          "note": "Axion rolls to minimum when m_a(T) ~ H(T); this is a phase transition effect"
        },
        {
          "field_a_term": "primordial black hole mass ~ 1 solar mass",
          "field_b_term": "QCD phase transition at T ~ 150 MeV (horizon mass at that epoch)",
          "note": "Softening of equation of state at QCD crossover enhances PBH formation"
        },
        {
          "field_a_term": "LIGO binary black hole merger rates",
          "field_b_term": "PBH mass function from density fluctuations near QCD epoch",
          "note": "Unexpected LIGO events in 30-100 Msun range may be QCD-epoch PBHs"
        }
      ],
      "references": [
        {
          "doi": "10.1103/PhysRevLett.39.165",
          "note": "Lee & Weinberg (1977) - WIMP miracle and electroweak freeze-out"
        },
        {
          "doi": "10.1103/PhysRevLett.38.1440",
          "note": "Peccei & Quinn (1977) - CP conservation in the presence of pseudoparticles; axion motivation"
        },
        {
          "arxiv": "hep-ph/0601168",
          "note": "Kolb & Turner (1990) review; Griest & Seckel (1991) - detailed WIMP freeze-out"
        },
        {
          "arxiv": "1801.03509",
          "note": "Carr et al. (2018) - primordial black holes as dark matter; mass spectrum review"
        },
        {
          "doi": "10.1103/PhysRevD.96.043511",
          "note": "Byrnes et al. (2018) - QCD phase transition and PBH formation enhancement"
        },
        {
          "arxiv": "2012.09164",
          "note": "Borsanyi et al. (2016) - lattice QCD axion mass from QCD topological susceptibility"
        }
      ],
      "last_reviewed": "2026-05-05",
      "file": "cross-domain/astronomy-physics/b-dark-matter-phase-transition-relics.yaml"
    },
    {
      "id": "b-gamma-ray-burst-jets-x-relativistic-hydrodynamics",
      "title": "Gamma-ray burst jets are relativistic outflows whose shocks, deceleration, and afterglow breaks are modeled with relativistic hydrodynamics and blast-wave theory bridging astronomy and plasma physics.",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "GRBs involve collimated flows with Lorentz factors inferred from opacity arguments and afterglow onset times. Internal shocks and external forward shocks convert kinetic energy into non-thermal particles and radiation. Relativistic hydrodynamics supplies self-similar blast-wave solutions (Blandford–",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-jet-break-timescale-scales-with-entropy-and-opening-angle"
      ],
      "communication_gap": "GRB reviews target astro audiences; relativistic fluid textbooks target plasma physics. Cross-citation of numerical method papers is uneven.",
      "translation_table": [
        {
          "field_a_term": "Lorentz factor Γ",
          "field_b_term": "ultra-relativistic bulk motion of ejecta"
        },
        {
          "field_a_term": "relativistic shock jump conditions",
          "field_b_term": "particle heating and synchrotron emission"
        },
        {
          "field_a_term": "jet break in light curve",
          "field_b_term": "hydrodynamic transition + geometric beaming effects"
        }
      ],
      "references": [
        {
          "doi": "10.12942/lrr-2004-10",
          "note": "Piran (2004) — physics of gamma-ray bursts (Living Reviews)"
        },
        {
          "doi": "10.1086/177395",
          "note": "Meszaros & Rees (1997) — relativistic fireballs and internal/external shocks (classic bridge)"
        }
      ],
      "last_reviewed": "2026-05-07",
      "file": "cross-domain/astronomy-physics/b-gamma-ray-burst-jets-x-relativistic-hydrodynamics.yaml"
    },
    {
      "id": "b-neutron-star-matter-x-qcd-phases",
      "title": "Neutron star interiors probe cold ultra-dense matter whose equation of state ties nuclear theory and QCD-informed models to observable masses, radii, and tidal deformabilities.",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "Neutron stars support masses up to about two solar masses, constraining pressure versus density relations for matter above nuclear saturation. Microscopic models combine nucleonic matter, hyperons, or quark phases depending on density; QCD provides constraints on high-density limits and symmetry ene",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-tidal-deformability-tightens-symmetry-energy-slope"
      ],
      "communication_gap": "Nuclear theorists emphasize microscopic interactions; astronomers emphasize observational posteriors. EOS parameterizations are not always expressed in QCD-interpretable coordinates.",
      "translation_table": [
        {
          "field_a_term": "equation of state P(ρ)",
          "field_b_term": "mass–radius relation and maximum mass"
        },
        {
          "field_a_term": "symmetry energy slope L",
          "field_b_term": "crust thickness and tidal Love numbers"
        },
        {
          "field_a_term": "phase transition to quark matter",
          "field_b_term": "softening of EOS and twin stars / glitch phenomenology (speculative)"
        }
      ],
      "references": [
        {
          "doi": "10.1016/S0370-1573(00)00064-8",
          "note": "Lattimer & Prakash (2001) — neutron star structure and the nuclear EOS"
        },
        {
          "doi": "10.1103/PhysRevLett.120.261103",
          "note": "Abbott et al. (2018) — GW170817 constraints on tidal deformability (macro-micro bridge)"
        }
      ],
      "last_reviewed": "2026-05-07",
      "file": "cross-domain/astronomy-physics/b-neutron-star-matter-x-qcd-phases.yaml"
    },
    {
      "id": "b-red-sequence-x-galaxy-evolution",
      "title": "The galaxy red sequence — a tight correlation between color and luminosity for passive galaxies — encodes a long-timescale link between star-formation quenching, stellar population aging, and small scatter that bridges observational astronomy and stellar evolution physics.",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "Star-forming galaxies occupy a blue cloud in color–magnitude space; quenched galaxies fall on a redder locus (the red sequence) with comparatively small scatter. The tightness implies synchronized cessation of star formation and a narrow distribution of stellar population ages and metallicities at f",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-red-sequence-age-spreads-constrain-quenching-models"
      ],
      "communication_gap": "Observers discuss color–magnitude diagrams; theorists emphasize hydrodynamic feedback. Quantitative scatter metrics are not always shared as standardized likelihood targets across simulation teams.",
      "translation_table": [
        {
          "field_a_term": "rest-frame color (e.g., U−R)",
          "field_b_term": "effective temperature / metallicity mix of composite stellar population"
        },
        {
          "field_a_term": "scatter perpendicular to sequence",
          "field_b_term": "spread in star-formation histories at fixed mass"
        },
        {
          "field_a_term": "buttressing by environment",
          "field_b_term": "ram-pressure / strangulation timescales"
        }
      ],
      "references": [
        {
          "doi": "10.1086/323304",
          "note": "Strateva et al. (2001) — color magnitude distribution and red sequence characterization"
        },
        {
          "doi": "10.1086/375002",
          "note": "Bell et al. (2003) — nearly 2 Gyr of passive evolution and the red sequence (representative SFH bridge)"
        }
      ],
      "last_reviewed": "2026-05-07",
      "file": "cross-domain/astronomy-physics/b-red-sequence-x-galaxy-evolution.yaml"
    },
    {
      "id": "b-stellar-structure-thermodynamics",
      "title": "Stars are self-gravitating thermodynamic systems with negative heat capacity — a feature unique to long-range gravitational interactions (Lynden-Bell & Wood 1968) — causing them to heat up when they lose energy, and the Lane-Emden polytrope equations describe hydrostatic equilibrium as a competition between gravitational potential and thermal pressure whose stability is governed by the virial theorem.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "In normal thermodynamic systems, heat capacity C = dE/dT > 0: adding energy increases temperature. Lynden-Bell & Wood (1968, MNRAS 138:495) showed that self-gravitating systems have C < 0 — a fundamental consequence of the virial theorem — with profound observational consequences:\n1. Negative heat c",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-negative-heat-capacity-stellar-stability-criterion"
      ],
      "communication_gap": "Astrophysicists and statistical physicists rarely co-author. The negative heat capacity result (Lynden-Bell & Wood 1968) is well-known in stellar dynamics and globular cluster research, but largely unknown in statistical physics pedagogy, where it would be a striking illustration of the non-universality of thermodynamic stability. Conversely, the Tsallis non-extensive statistics literature (statistical physics) has not been widely tested by observational astronomers. The Lane-Emden solutions are standard stellar structure pedagogy but their connection to polytropic phase transitions in condensed matter physics is rarely discussed.\n",
      "translation_table": [
        {
          "field_a_term": "heat capacity C = dE/dT > 0 (normal thermodynamic system)",
          "field_b_term": "negative heat capacity C < 0 (self-gravitating stellar system)"
        },
        {
          "field_a_term": "virial theorem (mechanics / statistical physics)",
          "field_b_term": "hydrostatic equilibrium condition in stellar structure"
        },
        {
          "field_a_term": "thermodynamic phase transition",
          "field_b_term": "Jeans instability (gas cloud to star collapse)"
        },
        {
          "field_a_term": "spinodal decomposition (negative compressibility)",
          "field_b_term": "gravothermal catastrophe (runaway core collapse)"
        },
        {
          "field_a_term": "polytropic index n (equation of state parameter)",
          "field_b_term": "Lane-Emden solution family (n=3 Chandrasekhar stability limit)"
        },
        {
          "field_a_term": "Boltzmann-Gibbs statistical mechanics (short-range interactions)",
          "field_b_term": "Lynden-Bell statistics (long-range gravitational interactions)"
        },
        {
          "field_a_term": "Fermi-Dirac / Bose-Einstein distributions",
          "field_b_term": "Lynden-Bell distribution (collisionless self-gravitating systems)"
        }
      ],
      "references": [
        {
          "doi": "10.1093/mnras/138.4.495",
          "note": "Lynden-Bell & Wood (1968) The gravo-thermal catastrophe in isothermal spheres and the onset of red-giant structure for stellar systems, MNRAS 138:495"
        },
        {
          "doi": "10.1093/mnras/95.3.207",
          "note": "Chandrasekhar (1935) The highly collapsed configurations of a stellar mass (second paper), MNRAS 95:207"
        },
        {
          "url": "https://www.jstor.org/stable/10.1086/145502",
          "note": "Lane (1870) On the theoretical temperature of the Sun — Am J Sci, original polytrope calculation"
        },
        {
          "url": "https://archive.org/details/gaskugeln00emde",
          "note": "Emden (1907) Gaskugeln — Teubner, Lane-Emden equation systematic treatment"
        },
        {
          "doi": "10.1093/mnras/136.1.101",
          "note": "Lynden-Bell (1967) Statistical mechanics of violent relaxation in stellar systems, MNRAS 136:101"
        }
      ],
      "last_reviewed": "2026-05-05",
      "file": "cross-domain/astronomy-physics/b-stellar-structure-thermodynamics.yaml"
    },
    {
      "id": "b-tidal-locking-spin-orbit-resonance",
      "title": "Tidal locking is a dissipative dynamical systems problem where tidal torques drive a satellite toward spin-orbit resonance attractors, with the 1:1 resonance (synchronous rotation) being the stable fixed point for low eccentricity orbits — explained by the same dissipative mechanics that governs coupled oscillator synchronization in physics.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "proposed",
      "bridge_claim": "The Moon always shows the same face to Earth because tidal forces from Earth dissipate energy in the Moon's interior until its rotation period equals its orbital period (1:1 spin-orbit resonance). Dynamical systems theory provides the framework: the tidally averaged torque creates a potential-like f",
      "open_unknowns": [],
      "related_hypotheses": [],
      "communication_gap": "",
      "translation_table": [
        {
          "field_a_term": "stable fixed point of tidally averaged spin equation",
          "field_b_term": "synchronous rotation (1:1 or other spin-orbit resonance)",
          "note": "The attractor of the dissipative spin dynamics — the resonance the body evolves toward"
        },
        {
          "field_a_term": "tidal Q factor (dissipation rate)",
          "field_b_term": "rate of approach to synchronous rotation",
          "note": "Low Q (high dissipation) = fast locking; rocky planets lock in ~10^8-10^9 yr"
        },
        {
          "field_a_term": "resonance width (related to eccentricity)",
          "field_b_term": "capture probability into resonance",
          "note": "High eccentricity opens multiple stable resonance attractors (3:2, 2:1, etc.)"
        },
        {
          "field_a_term": "Arnold tongue (phase-locking region in parameter space)",
          "field_b_term": "tidal locking parameter space (Q vs. eccentricity diagram)",
          "note": "The region in parameter space where a given resonance is stable"
        }
      ],
      "references": [
        {
          "doi": "10.1051/0004-6361:20011007",
          "note": "Correia & Laskar (2001) — the four final rotation states of Venus"
        },
        {
          "doi": "10.1006/icar.1996.0166",
          "note": "Goldreich & Soter (1966) — Q in the solar system; tidal dissipation and spin-orbit coupling"
        },
        {
          "doi": "10.1093/mnras/195.3.583",
          "note": "Hut (1981) — tidal evolution in close binary systems"
        }
      ],
      "last_reviewed": "2026-05-07",
      "file": "cross-domain/astronomy-physics/b-tidal-locking-spin-orbit-resonance.yaml"
    },
    {
      "id": "b-virial-theorem-x-molecular-cloud-cluster-equilibrium",
      "title": "The virial theorem balances kinetic and gravitational potential energy in self-gravitating systems — central to molecular-cloud mass estimates from line widths and to galaxy-cluster masses inferred from galaxy velocities and X-ray gas pressure — with explicit caveats when turbulence, magnetic fields, or departures from spherical equilibrium break simple 2K+V≈0 scaling assumptions.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "Giant molecular clouds are routinely characterized via virial estimates linking internal velocity dispersion σ and radius R to mass M ~ k σ² R / G when approximate equilibrium holds; galaxy clusters likewise combine galaxy velocity dispersions and hot intracluster medium hydrostatic X-ray modeling u",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-virial-multicomponent-consistency-reduces-cluster-mass-bias"
      ],
      "communication_gap": "Star-formation seminars emphasize linewidth-based virial masses while cluster cosmology seminars emphasize Navarro–Frenk–White halos and hydrostatic X-ray biases — students rarely see a unified checklist of virial-theorem breakdown modes across scales.\n",
      "translation_table": [
        {
          "field_a_term": "virial parameter α_vir ≈ 5σ² R / (G M) for clouds",
          "field_b_term": "cluster mass estimators combining σ_v and gas profiles",
          "note": "Same balance theme — distinct systematic-error budgets."
        },
        {
          "field_a_term": "turbulent + magnetic pressure support in GMCs",
          "field_b_term": "nonthermal cluster ICM turbulence / cosmic-ray pressure",
          "note": "Extra terms violate naive virial bookkeeping unless modeled."
        },
        {
          "field_a_term": "cloud ensemble scaling relations (Larson-like laws)",
          "field_b_term": "cluster scaling laws (e.g., Y_X–M, L–T relations)",
          "note": "Emergent correlations across hierarchies — not literal equality."
        }
      ],
      "references": [
        {
          "doi": "10.1146/annurev.astro.45.051806.110602",
          "note": "McKee & Ostriker (2007) — theory of star formation including molecular cloud phase structure and virial framing (Annu Rev Astron Astrophys)."
        },
        {
          "doi": "10.1146/annurev.astro.25.1.23",
          "note": "Shu (1987) — star formation in molecular clouds (Annu Rev Astron Astrophys; virial / equilibrium language in GMC discussion)."
        }
      ],
      "last_reviewed": "2026-05-07",
      "file": "cross-domain/astronomy-physics/b-virial-theorem-x-molecular-cloud-cluster-equilibrium.yaml"
    },
    {
      "id": "b-stellar-nucleosynthesis-periodic-table",
      "title": "All chemical elements heavier than hydrogen and helium were forged in stars — the periodic table is a record of stellar evolution history, quantitatively explained by nuclear physics reactions in successive stellar environments.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "The Burbidge, Burbidge, Fowler & Hoyle (B²FH, 1957) paper established that stellar nucleosynthesis accounts for the cosmic abundance of all elements: pp-chain and CNO cycle produce helium in main-sequence stars; triple-alpha process fuses three helium nuclei into carbon at 10^8 K in red giants; alph",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-r-process-cgm-metal-enrichment"
      ],
      "communication_gap": "Nuclear physicists, astrophysicists, and chemists measure abundances and model reactions but publish in separate communities. The quantitative connection between nuclear cross- sections measured at particle accelerators (FRIB, GSI) and observed stellar abundances requires both communities to engage — this is improving but cross-disciplinary papers remain rare.\n",
      "translation_table": [
        {
          "field_a_term": "stellar interior temperature / density",
          "field_b_term": "nuclear reaction rates (Gamow window)",
          "note": "Tunneling through Coulomb barrier at stellar temperature selects which reactions occur"
        },
        {
          "field_a_term": "neutron capture cross-section",
          "field_b_term": "elemental abundance ratio",
          "note": "Large σ(n,γ) → low abundance at that mass number; abundance pattern directly encodes nuclear physics"
        },
        {
          "field_a_term": "r-process vs s-process path",
          "field_b_term": "neutron-rich vs stable nuclide synthesis",
          "note": "r-process produces elements far from stability; s-process follows the valley of stability"
        },
        {
          "field_a_term": "neutron star merger ejecta (kilonova)",
          "field_b_term": "r-process chemical yield per event",
          "note": "GW170817 confirmed ~0.05 solar masses of lanthanide-rich r-process ejecta"
        }
      ],
      "references": [
        {
          "doi": "10.1103/RevModPhys.29.547",
          "note": "Burbidge, E.M. et al. (1957). Synthesis of the elements in stars. Rev Mod Phys 29:547."
        },
        {
          "doi": "10.1103/PhysRev.55.434",
          "note": "Bethe, H. (1939). Energy production in stars. Phys Rev 55:434."
        },
        {
          "doi": "10.3847/2041-8213/aa920c",
          "note": "Abbott, B.P. et al. (2017). Multi-messenger observations of a binary neutron star merger. ApJ Lett 848:L12."
        },
        {
          "doi": "10.1086/127051",
          "note": "Cameron, A.G.W. (1957). Nuclear reactions in stars and nucleogenesis. Publ Astron Soc Pac 69:201."
        }
      ],
      "last_reviewed": "2026-05-06",
      "file": "cross-domain/astrophysics-chemistry/b-stellar-nucleosynthesis-periodic-table.yaml"
    },
    {
      "id": "b-accretion-disk-mhd-turbulence",
      "title": "Accretion disk angular momentum transport is governed by the magnetorotational instability (MRI) — a linear MHD instability in differentially rotating magnetized plasmas that drives turbulence and mediates the anomalous viscosity α required to explain observed accretion rates.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "Accretion disks around compact objects (black holes, neutron stars, white dwarfs, young stellar objects) must transport angular momentum outward to allow mass to flow inward. Molecular viscosity is 13 orders of magnitude too small to explain observed accretion rates. Shakura & Sunyaev (1973) paramet",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-mri-turbulence-alpha-prediction"
      ],
      "communication_gap": "Astrophysicists work with global disk simulations using general relativistic MHD (GRMHD), while fluid dynamicists develop turbulence theory for simpler Cartesian shear flows. The parameter regimes accessible in laboratory experiments (Pm < 1) differ from astrophysical disks (Pm >> 1 in hot plasmas). The communities publish in the Astrophysical Journal vs. Journal of Fluid Mechanics, with limited overlap. The connection between the Shakura-Sunyaev α model and MRI turbulence is widely understood in astrophysics but rarely taught in fluid dynamics courses.\n",
      "translation_table": [
        {
          "field_a_term": "differential rotation profile Ω(r) ∝ r^{-3/2} (Keplerian)",
          "field_b_term": "shear flow profile in rotating Couette-Taylor apparatus",
          "note": "The MRI operates identically in the astrophysical and laboratory settings"
        },
        {
          "field_a_term": "weak vertical magnetic field B_z",
          "field_b_term": "applied axial magnetic field in liquid metal experiments",
          "note": "Sub-thermal field required; v_A = B/√(4πρ) < c_s"
        },
        {
          "field_a_term": "MRI growth rate ~ Ω (fastest mode)",
          "field_b_term": "instability growth timescale in shear-flow MHD",
          "note": "Fastest growing wavelength λ_MRI = 2π v_A/Ω"
        },
        {
          "field_a_term": "angular momentum transport by Maxwell stress ⟨B_r B_φ⟩/4π",
          "field_b_term": "turbulent momentum flux in MHD shear turbulence",
          "note": "The α parameter α ≈ ⟨B_r B_φ + ρ v_r v_φ⟩/P"
        },
        {
          "field_a_term": "anomalous viscosity α (Shakura-Sunyaev)",
          "field_b_term": "turbulent eddy viscosity in fluid dynamics",
          "note": "Both parametrize sub-grid momentum transport"
        },
        {
          "field_a_term": "accretion timescale t_acc = r²/ν = r²/(α c_s H)",
          "field_b_term": "viscous diffusion timescale in turbulent fluid"
        },
        {
          "field_a_term": "dead zones (low-ionization regions with suppressed MRI)",
          "field_b_term": "laminar regions between turbulent zones",
          "note": "Relevant to planet formation in protoplanetary disks"
        }
      ],
      "references": [
        {
          "doi": "10.1086/170270",
          "note": "Balbus & Hawley (1991) ApJ 376:214 — definitive MRI paper for accretion disks"
        },
        {
          "note": "Shakura & Sunyaev (1973) A&A 24:337 — α-disk model"
        },
        {
          "note": "Lynden-Bell & Pringle (1974) MNRAS 168:603 — viscous disk evolution"
        },
        {
          "doi": "10.1051/0004-6361:20077537",
          "note": "Fromang & Papaloizou (2007) A&A 476:1123 — MRI turbulence and angular momentum transport"
        },
        {
          "doi": "10.1103/PhysRevLett.97.184503",
          "note": "Sisan et al. (2004/2006) — laboratory MRI in liquid metal Couette flow"
        }
      ],
      "last_reviewed": "2026-05-06",
      "file": "cross-domain/astrophysics-fluid-dynamics/b-accretion-disk-mhd-turbulence.yaml"
    },
    {
      "id": "b-solar-wind-alfven-wave-turbulence",
      "title": "The solar wind is a magnetohydrodynamic turbulent medium dominated by Alfvén wave fluctuations propagating outward from the corona, whose spectral cascade from large injection scales to dissipation at ion inertial lengths follows Kolmogorov-like scaling modified by anisotropy and Alfvénic imbalance\n",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "Solar wind turbulence is described by MHD as counter-propagating Alfvén wave packets interacting to drive a spectral energy cascade: outward-propagating Elsässer variables z+ (dominant) and inward-propagating z- interact nonlinearly with rate proportional to their cross-helicity imbalance, transferr",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-alfven-turbulence-stochastic-ion-heating"
      ],
      "communication_gap": "Heliosphericists analyse solar wind spacecraft data (Parker Solar Probe, Wind) while fluid mechanicists study turbulence in laboratory settings; the MHD Alfvénic turbulence framework is well-established theoretically but the connection to classical Kolmogorov phenomenology is not always made explicit for fluid mechanics audiences.\n",
      "translation_table": [
        {
          "field_a_term": "solar wind velocity fluctuations (astrophysics)",
          "field_b_term": "Elsässer variable z+ = v + b/sqrt(4πρ) for outward Alfvén wave packets (plasma physics)",
          "note": "Elsässer formulation separates counter-propagating Alfvén modes; z+ dominates in fast wind"
        },
        {
          "field_a_term": "solar wind magnetic field fluctuations (astrophysics)",
          "field_b_term": "normalized fluctuating magnetic field b = δB/sqrt(4πρ) in Alfvén units (plasma physics)",
          "note": "Alfvénic fluctuations have |δv| ≈ |δB|/sqrt(4πρ), observed in fast wind with cross-helicity σ_c ~ 0.9"
        },
        {
          "field_a_term": "power spectral density of solar wind fluctuations (astrophysics)",
          "field_b_term": "turbulent energy spectrum E(k) scaling as k^-5/3 (fluid mechanics)",
          "note": "Kolmogorov scaling observed at inertial range scales 10^4 - 10^6 km in the solar wind"
        },
        {
          "field_a_term": "solar corona heating problem (astrophysics)",
          "field_b_term": "Alfvén wave energy dissipation at ion inertial length / kinetic dissipation scales (plasma physics)",
          "note": "Alfvén wave turbulent cascade delivers energy to ion gyroradius scale where kinetic damping occurs"
        }
      ],
      "references": [
        {
          "doi": "10.1086/148138",
          "note": "Coleman (1968) - turbulence, viscosity, and dissipation in the solar-wind plasma"
        },
        {
          "doi": "10.3847/2041-8213/ab0f43",
          "note": "Chen et al. (2020) - evidence for a scale-dependent energy cascade in solar wind turbulence"
        }
      ],
      "last_reviewed": "2026-05-07",
      "file": "cross-domain/astrophysics-fluid-dynamics/b-solar-wind-alfven-wave-turbulence.yaml"
    },
    {
      "id": "b-black-hole-entropy-holographic",
      "title": "The Bekenstein-Hawking entropy S = A/4 (area, not volume) of a black hole implies the holographic principle — that the maximum information content of any 3D region is bounded by its 2D boundary area, making information theory and spacetime geometry equivalent at the Planck scale.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "The discovery that black holes have entropy proportional to their surface area — not volume — is the most profound known connection between spacetime geometry and information theory.\n1. Bekenstein-Hawking entropy. Bekenstein (1973) argued that the second law\n   of thermodynamics requires black holes",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-spacetime-emerges-from-entanglement"
      ],
      "communication_gap": "Information theorists and astrophysicists/string theorists publish in disjoint venues (IEEE Transactions on Information Theory vs. Physical Review Letters, Journal of High Energy Physics). The mathematical tools (von Neumann entropy, quantum information, tensor networks) are being transferred between fields, but practitioners in each field often lack fluency in the other's formalism. The AdS/CFT community is increasingly importing quantum information concepts; the quantum information community is increasingly aware of holography.\n",
      "translation_table": [
        {
          "field_a_term": "Shannon entropy H (information theory)",
          "field_b_term": "Bekenstein-Hawking entropy S_BH (black hole physics)",
          "note": "Both measure missing information; S_BH = k_B ln(microstates)"
        },
        {
          "field_a_term": "information capacity of a channel (Shannon)",
          "field_b_term": "maximum information in a spacetime region (holographic bound)",
          "note": "Holographic bound A/4l²_P bits is the physical channel capacity limit"
        },
        {
          "field_a_term": "data compression limit (Shannon source coding theorem)",
          "field_b_term": "Bekenstein bound — minimum volume to store given information",
          "note": "Both set fundamental limits on information density"
        },
        {
          "field_a_term": "error-correcting code (information theory)",
          "field_b_term": "quantum error correction in AdS/CFT (bulk reconstruction)",
          "note": "Holographic entanglement is a quantum error-correcting code (Almheiri 2015)"
        },
        {
          "field_a_term": "mutual information I(A:B) (information theory)",
          "field_b_term": "Ryu-Takayanagi formula: I(A:B) = 2S_γ where γ is minimal surface",
          "note": "Entanglement entropy in CFT = minimal surface area in AdS geometry"
        }
      ],
      "references": [
        {
          "doi": "10.1103/PhysRevD.7.2333",
          "note": "Bekenstein (1973) Phys Rev D 7:2333 — black holes and entropy"
        },
        {
          "doi": "10.1007/BF01608497",
          "note": "Hawking (1975) Commun Math Phys 43:199 — particle creation by black holes"
        },
        {
          "arxiv": "gr-qc/9310026",
          "note": "'t Hooft (1993) — dimensional reduction in quantum gravity"
        },
        {
          "doi": "10.1063/1.531249",
          "note": "Susskind (1995) J Math Phys 36:6377 — the world as a hologram"
        }
      ],
      "last_reviewed": "2026-05-06",
      "file": "cross-domain/astrophysics-information-theory/b-black-hole-entropy-holographic.yaml"
    },
    {
      "id": "b-general-relativity-differential-geometry",
      "title": "General relativity is differential geometry applied to physics — spacetime curvature is the Riemann curvature tensor and gravity emerges from geodesic deviation",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "Einstein's field equations Gμν + Λgμν = (8πG/c⁴)Tμν express that the curvature of spacetime (Einstein tensor Gμν, derived from the Riemann curvature tensor Rμναβ) equals the stress-energy content of matter (Tμν). Spacetime is a pseudo-Riemannian 4-manifold with metric tensor gμν; gravity is not a fo",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-gr-gauge-theory-fiber-bundle-unification"
      ],
      "communication_gap": "General relativity requires Riemannian geometry typically taught in graduate mathematics programs, while physics curricula often present GR through tensor index notation without the underlying differential geometry formalism. Mathematicians who generalize Riemannian geometry (sub-Riemannian, Finsler, non-commutative geometry) rarely engage with astrophysical applications.\n",
      "translation_table": [
        {
          "field_a_term": "metric tensor g_mu_nu",
          "field_b_term": "gravitational potential (generalizes Newtonian phi)",
          "note": "In weak-field limit, g_00 = -(1 + 2phi/c^2), recovering Newtonian gravity"
        },
        {
          "field_a_term": "geodesic equation (d^2x^mu/dtau^2 + Gamma^mu_alphabeta dx^alpha/dtau dx^beta/dtau = 0)",
          "field_b_term": "equation of motion of a free-falling body",
          "note": "Christoffel symbols Gamma encode the gravitational field; geodesic = free fall"
        },
        {
          "field_a_term": "Riemann curvature tensor R^alpha_betagammadelta",
          "field_b_term": "tidal gravitational forces (geodesic deviation equation)",
          "note": "Riemann tensor measures how nearby geodesics accelerate toward/away from each other"
        },
        {
          "field_a_term": "Bianchi identity (contracted: nabla_mu G^mu_nu = 0)",
          "field_b_term": "energy-momentum conservation nabla_mu T^mu_nu = 0",
          "note": "Mathematical identity in differential geometry encodes physical conservation law"
        },
        {
          "field_a_term": "Einstein-Hilbert action S = integral (R - 2Lambda) sqrt(-g) d^4x",
          "field_b_term": "variational principle from which Einstein's equations are derived",
          "note": "Riemann scalar curvature R is the Lagrangian density for gravity"
        }
      ],
      "references": [
        {
          "note": "Einstein, A. (1915). Die Feldgleichungen der Gravitation. Sitzungsber Preuss Akad Wiss."
        },
        {
          "note": "Riemann, B. (1854). Über die Hypothesen, welche der Geometrie zu Grunde liegen. Göttingen."
        },
        {
          "note": "Misner, C.W., Thorne, K.S. & Wheeler, J.A. (1973). Gravitation. W.H. Freeman."
        },
        {
          "note": "Wald, R.M. (1984). General Relativity. University of Chicago Press."
        }
      ],
      "last_reviewed": "2026-05-06",
      "file": "cross-domain/astrophysics-mathematics/b-general-relativity-differential-geometry.yaml"
    },
    {
      "id": "b-gravitational-lensing-optical-caustics",
      "title": "Gravitational lensing by galaxy clusters and individual galaxies produces arc patterns and caustic surfaces that are mathematically identical to optical caustics described by catastrophe theory: the Einstein ring, fold, and cusp arcs correspond to the fold, cusp, and swallowtail catastrophes of Thom's classification, unifying astrophysical lensing with the geometric optics of wavefront singularities",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "The lensing map from source plane to image plane is a smooth map between two-dimensional planes, and its singularities form the critical curves in the image plane and caustic curves in the source plane; the local structure of these singularities is classified by catastrophe theory (Thom 1975) — fold",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-gravitational-lensing-caustic-classification-test"
      ],
      "communication_gap": "Astrophysicists use numerical ray-tracing to compute lens maps while catastrophe theorists work with abstract smooth maps; the connection is textbook knowledge in gravitational lens theory but analytical catastrophe-theory templates are not routinely used in operational microlensing survey pipelines.",
      "translation_table": [
        {
          "field_a_term": "Einstein ring (perfect alignment lensing) (astrophysics)",
          "field_b_term": "circular caustic / degenerate critical point (catastrophe theory)",
          "note": "Perfect axial symmetry gives a degenerate fold forming a complete ring; perturbation breaks it into fold arcs"
        },
        {
          "field_a_term": "fold arc in gravitational lensing (astrophysics)",
          "field_b_term": "fold catastrophe A_2 singularity of smooth map (mathematics)",
          "note": "Near a fold, two images merge and the magnification scales as |delta|^{-1/2}; identical to optical fold caustic"
        },
        {
          "field_a_term": "cusp arc or pair of merging arcs (astrophysics)",
          "field_b_term": "cusp catastrophe A_3 singularity (mathematics)",
          "note": "Three images merge at a cusp; magnification diverges faster than at a fold"
        },
        {
          "field_a_term": "caustic crossing in microlensing light curve (astrophysics)",
          "field_b_term": "jump discontinuity in image number at a fold caustic (catastrophe theory)",
          "note": "Both show the same sharp brightening and exact |t-t_cross|^{-1/2} light curve profile at caustic crossing"
        }
      ],
      "references": [
        {
          "doi": "10.1007/978-3-540-97648-9",
          "note": "Schneider et al. (1992) - gravitational lenses (comprehensive treatment of caustics in lensing)"
        },
        {
          "doi": "10.1093/mnras/218.4.575",
          "note": "Blandford & Narayan (1986) - fermat's principle, caustics, and the caustic crossing in lensing (catastrophe connection)"
        },
        {
          "doi": "10.1007/BF01404567",
          "note": "Thom (1975) - structural stability and morphogenesis (catastrophe theory classification)"
        }
      ],
      "last_reviewed": "2026-05-07",
      "file": "cross-domain/astrophysics-mathematics/b-gravitational-lensing-optical-caustics.yaml"
    },
    {
      "id": "b-stellar-nucleosynthesis-network-flow",
      "title": "Stellar nucleosynthesis proceeds through a reaction network of hundreds of isotopes connected by nuclear reactions, and the relative abundances of elements produced can be computed by solving the same maximum-flow and steady-state flux equations used in metabolic network analysis and chemical engineering yield problems",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "The abundance evolution of nuclides in a stellar burning zone is governed by a coupled ODE network dY_i/dt = sum_j lambda_{ji} Y_j - Y_i sum_k lambda_{ik}, where Y_i are molar abundances and lambda are reaction rates from nuclear physics; this is formally identical to the flux-balance analysis (FBA)",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-r-process-neutron-star-merger-dominant"
      ],
      "communication_gap": "Nuclear astrophysicists build large custom ODE solvers for reaction networks while network scientists and systems biologists use general-purpose flux analysis software; cross-pollination is limited by the domain-specific notation (Gamow window, Q-values, waiting points) not familiar to network theorists.",
      "translation_table": [
        {
          "field_a_term": "nuclear reaction rate lambda_{ij} (Gamow factor * Maxwell-Boltzmann integral) (nuclear physics)",
          "field_b_term": "edge flux in a directed flow network (network science)",
          "note": "Each nuclear reaction is an edge; its capacity is the temperature- and density-dependent rate coefficient"
        },
        {
          "field_a_term": "isotope abundance Y_i (astrophysics)",
          "field_b_term": "node concentration in metabolic network steady state (network science)",
          "note": "Abundance evolves by conservation: production minus destruction fluxes, identical to metabolic FBA node balance"
        },
        {
          "field_a_term": "waiting point isotope at neutron magic number (astrophysics)",
          "field_b_term": "rate-limiting bottleneck node in flow network (network science)",
          "note": "Nuclides with long beta-decay half-lives are bottlenecks that determine the r-process path and abundance peaks"
        },
        {
          "field_a_term": "nuclear statistical equilibrium (NSE) at high T (astrophysics)",
          "field_b_term": "thermodynamic equilibrium / maximum entropy steady state (network science)",
          "note": "NSE is the maximum entropy distribution of nuclides given conserved proton and neutron numbers, analogous to detailed balance"
        }
      ],
      "references": [
        {
          "doi": "10.1146/annurev.astro.46.060407.145207",
          "note": "Arnould et al. (2007) - the r-process of stellar nucleosynthesis: astrophysics and nuclear physics achievements and mysteries"
        },
        {
          "doi": "10.1086/309265",
          "note": "Burbidge et al. (1957) - synthesis of the elements in stars (B2FH: foundational nucleosynthesis network paper)"
        },
        {
          "doi": "10.1103/RevModPhys.93.015002",
          "note": "Cowan et al. (2021) - origin of the heaviest elements: the rapid neutron-capture process (r-process review)"
        }
      ],
      "last_reviewed": "2026-05-07",
      "file": "cross-domain/astrophysics-nuclear-physics/b-stellar-nucleosynthesis-network-flow.yaml"
    },
    {
      "id": "b-cosmological-inflation-slow-roll-scalar",
      "title": "Cosmological inflation is driven by a slowly rolling scalar field (inflaton) in a de Sitter-like background, generating a nearly scale-invariant power spectrum of primordial density perturbations that directly tests quantum field theory in curved spacetime\n",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "Inflation occurs when a scalar field φ rolls slowly (φ̈ ≪ 3Hφ̇, V ≫ φ̇²/2) on a nearly flat potential V(φ), maintaining approximate de Sitter expansion (H² ≈ V/3M_pl²); quantum fluctuations of φ during inflation are stretched to superhorizon scales and frozen as classical perturbations, generating a",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-slow-roll-spectral-tilt-potential-discrimination"
      ],
      "communication_gap": "Cosmologists interpret CMB observations as constraints on inflaton models while quantum field theorists study vacuum structure in curved spacetime; the translation between potential V(φ) shape and observable (n_s, r) constraints is well-known in inflationary cosmology but requires QFT in curved spacetime expertise rarely possessed by astronomers.\n",
      "translation_table": [
        {
          "field_a_term": "inflaton scalar field φ(t) (cosmology)",
          "field_b_term": "scalar quantum field in de Sitter background spacetime (quantum field theory)",
          "note": "The inflaton is a quantum field whose vacuum fluctuations seed density perturbations; classically it evolves via the Klein-Gordon equation"
        },
        {
          "field_a_term": "slow-roll condition ε = -Ḣ/H² ≪ 1 (cosmology)",
          "field_b_term": "flatness condition on scalar field potential V(φ): (V'/V)² ≪ 16π G (quantum field theory)",
          "note": "Slow roll requires the potential to be nearly flat; ε measures deviation from exact de Sitter (ε = 0)"
        },
        {
          "field_a_term": "CMB scalar spectral index n_s ≈ 0.965 (cosmology)",
          "field_b_term": "quantum field theory prediction from slow-roll parameters: n_s = 1 - 6ε + 2η (quantum field theory)",
          "note": "Planck measurement n_s = 0.965 ± 0.004 constrains the inflaton potential shape via slow-roll parameters"
        },
        {
          "field_a_term": "tensor-to-scalar ratio r (cosmology)",
          "field_b_term": "primordial gravitational wave amplitude from quantum gravity fluctuations during inflation (quantum field theory)",
          "note": "r = 16ε constrains inflationary energy scale; current bound r < 0.036 rules out many large-field models"
        }
      ],
      "references": [
        {
          "doi": "10.1103/PhysRevD.23.347",
          "note": "Guth (1981) - inflationary universe: a possible solution to the horizon and flatness problems"
        },
        {
          "doi": "10.1051/0004-6361/201833887",
          "note": "Planck Collaboration (2019) - Planck 2018 results X: constraints on inflation"
        }
      ],
      "last_reviewed": "2026-05-07",
      "file": "cross-domain/astrophysics-physics/b-cosmological-inflation-slow-roll-scalar.yaml"
    },
    {
      "id": "b-neutron-star-nuclear-eos",
      "title": "Neutron star mass-radius relationships encode the dense matter equation of state, connecting neutron star astrophysics to nuclear symmetry energy and constraining the pressure-density relationship of matter at 2-8 times nuclear saturation density",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "The neutron star mass-radius curve M(R) is a one-to-one map from the equation of state P(rho), determined by integrating the Tolman-Oppenheimer-Volkoff (TOV) equations; NICER X-ray timing measurements of M and R constrain P(rho) at 2-5 times nuclear saturation density, providing the highest-density ",
      "open_unknowns": [],
      "related_hypotheses": [],
      "communication_gap": "Nuclear physicists measure symmetry energy in heavy-ion collisions and finite nuclei while astrophysicists measure M-R from pulsars; the TOV mapping connecting nuclear lab measurements to neutron star observables requires translating between nuclear and relativistic frameworks rarely taught in either community.",
      "translation_table": [
        {
          "field_a_term": "neutron star radius R at 1.4 solar masses",
          "field_b_term": "pressure of beta-equilibrated nuclear matter at rho ~ 2*rho_0",
          "note": "R_1.4 ~ 11-13 km from NICER; each km in R corresponds to ~10 MeV/fm^3 difference in pressure at 2*rho_0"
        },
        {
          "field_a_term": "maximum neutron star mass M_max",
          "field_b_term": "stiffness of EOS at rho > 4*rho_0",
          "note": "M_max ~ 2 M_sun (PSR J0952-0607) requires stiff EOS at high density; rules out pure quark matter or kaon condensate models"
        },
        {
          "field_a_term": "nuclear symmetry energy slope L at saturation density",
          "field_b_term": "neutron star crust thickness and pressure-radius correlation",
          "note": "L = 3*rho_0 * dS/drho at rho_0; larger L means stiffer symmetry energy and larger radius"
        },
        {
          "field_a_term": "gravitational wave tidal deformability Lambda from NS merger",
          "field_b_term": "quadrupole polarizability of nuclear matter",
          "note": "LIGO/Virgo GW170817 tidal deformability Lambda_1.4 < 800 constrains EOS at 1-2*rho_0"
        }
      ],
      "references": [
        {
          "doi": "10.3847/2041-8213/ab0c2f",
          "note": "Riley et al. (2019) ApJL - NICER measurement of PSR J0030+0451 mass and radius"
        },
        {
          "doi": "10.3847/2041-8213/aa9035",
          "note": "Abbott et al. (2017) ApJL - GW170817 tidal deformability and EOS constraints"
        },
        {
          "doi": "10.1103/PhysRevLett.120.172703",
          "note": "Annala et al. (2018) PRL - neutron star mergers and the quark-hadron crossover from EOS constraints"
        }
      ],
      "last_reviewed": "2026-05-07",
      "file": "cross-domain/astrophysics-physics/b-neutron-star-nuclear-eos.yaml"
    },
    {
      "id": "b-neutron-star-nuclear-matter",
      "title": "Neutron star interiors at 2-8× nuclear saturation density are the densest observable matter in the universe — the equation of state P(ρ) bridges nuclear physics (strong force) to astrophysics (compact object structure) through the Tolman-Oppenheimer-Volkoff equation, constrained by LIGO/Virgo tidal deformability measurements.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "NEUTRON STAR INTERIOR PHYSICS: Nuclear saturation density: ρ₀ = 2.3×10¹⁴ g/cm³. Neutron star core: ρ = 2-8ρ₀ — accessible to no terrestrial experiment but observable via neutron star structure.\nTOLMAN-OPPENHEIMER-VOLKOFF EQUATION (TOV):\n  dP/dr = -G(ρ + P/c²)(M + 4πr³P/c²) / [r²(1 - 2GM/rc²)]\n\nThis ",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-neutron-star-radius-eos-constraint"
      ],
      "communication_gap": "Nuclear physicists and astrophysicists work on different aspects of this problem. Nuclear theory groups computing EOS do not always have expertise in GR structure equations; relativistic astrophysicists computing TOV solutions may not deeply know the nuclear physics inputs. The joint analysis required for GW inference bridged this gap post-GW170817.\n",
      "translation_table": [
        {
          "field_a_term": "equation of state P(rho) (nuclear physics)",
          "field_b_term": "mass-radius relation M(R) (astrophysics)",
          "note": "TOV equation is the mapping — same EOS produces unique M-R curve; different nuclear models produce measurably different R at given M"
        },
        {
          "field_a_term": "nuclear saturation density rho_0 (nuclear physics)",
          "field_b_term": "neutron star central density reference scale",
          "note": "rho_0 = 2.3×10^14 g/cm^3; neutron star cores reach 2-8 rho_0 — regime inaccessible to Earth labs"
        },
        {
          "field_a_term": "tidal Love number k2 (GR / astrophysics)",
          "field_b_term": "nuclear compressibility K (nuclear physics)",
          "note": "Stiffer EOS (larger K) → larger R → larger Λ; GW tidal measurement constrains nuclear compressibility"
        },
        {
          "field_a_term": "quark matter EOS (particle physics / QCD)",
          "field_b_term": "mass gap in M-R curve (astrophysics)",
          "note": "A QCD phase transition at ~3rho_0 would produce a kink in M-R relation detectable with enough neutron star measurements"
        }
      ],
      "references": [
        {
          "doi": "10.1103/PhysRev.55.374",
          "note": "Oppenheimer & Volkoff (1939) Phys Rev 55:374 — TOV equation"
        },
        {
          "doi": "10.1086/319702",
          "note": "Lattimer & Prakash (2001) ApJ 550:426 — EOS review"
        },
        {
          "doi": "10.1103/PhysRevLett.121.161101",
          "note": "Abbott et al. (2018) Phys Rev Lett 121:161101 — GW170817 tidal constraint"
        },
        {
          "doi": "10.1088/1361-6633/aaae14",
          "note": "Baym et al. (2018) Rep Prog Phys 81:056902 — dense matter review"
        }
      ],
      "last_reviewed": "2026-05-06",
      "file": "cross-domain/astrophysics-physics/b-neutron-star-nuclear-matter.yaml"
    },
    {
      "id": "b-primordial-nucleosynthesis-reaction-networks",
      "title": "Primordial nucleosynthesis is a nuclear reaction network ODE: Big Bang nucleosynthesis (BBN) computes the abundances of H, D, He-3, He-4, and Li-7 from baryon-to-photon ratio η using the same coupled ODE formalism as stellar nucleosynthesis",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "Big Bang nucleosynthesis (BBN) traces abundances X_i(t) of ~26 nuclides from T~10 MeV (t~10⁻² s) to T~0.01 MeV (t~10³ s) using a coupled ODE system: dX_i/dt = Σ_j (production rates) - Σ_j (destruction rates), where each rate r_{ij} = ⟨σv⟩_{ij} · n_j · n_i depends on nuclear cross sections ⟨σv⟩ measu",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-primordial-nucleosynthesis-reaction-networks"
      ],
      "communication_gap": "Cosmologists who discuss BBN often present it as solved, without engaging with the nuclear physics uncertainty propagation through the ODE network. Nuclear physicists who measure cross sections at LUNA are not always aware of how their uncertainties propagate to primordial abundance predictions. The ODE formalism used in BBN codes (PArthENoPE, AlterBBN) is identical to that in stellar evolution codes but the communities rarely exchange methods.\n",
      "translation_table": [
        {
          "field_a_term": "primordial He-4 mass fraction Y_p",
          "field_b_term": "solution to the BBN ODE network at freeze-out T ~ 0.07 MeV",
          "note": "Y_p~0.247; measured in metal-poor HII regions; constrains η and N_eff"
        },
        {
          "field_a_term": "nuclear reaction rate ⟨σv⟩_{ij}(T)",
          "field_b_term": "temperature-dependent rate coefficient in the NRN ODE right-hand side",
          "note": "Most rates measured at LUNA (underground, low background) to 1–5% precision"
        },
        {
          "field_a_term": "baryon-to-photon ratio η",
          "field_b_term": "single free parameter determining the full solution trajectory",
          "note": "η~6.1×10⁻¹⁰ from CMB; BBN predictions at this η match all primordial abundances"
        },
        {
          "field_a_term": "neutron-to-proton ratio n/p at weak freeze-out (T~0.8 MeV)",
          "field_b_term": "initial condition of the ODE network set by weak interaction rates",
          "note": "n/p≈1/7 at freeze-out; determines He-4 fraction since almost all neutrons end up in He-4"
        }
      ],
      "references": [
        {
          "doi": "10.1146/annurev.aa.30.090192.001229",
          "note": "Walker et al. (1991) Primordial nucleosynthesis redux. Astrophys J 376:51"
        },
        {
          "doi": "10.1016/j.physrep.2015.12.005",
          "note": "Cyburt et al. (2016) Big Bang nucleosynthesis — 2015. Rev Mod Phys 88:015004"
        },
        {
          "doi": "10.1016/j.physletb.2007.12.028",
          "note": "Iocco et al. (2009) Primordial nucleosynthesis: from precision cosmology to fundamental physics. Phys Rept 472:1"
        }
      ],
      "last_reviewed": "2026-05-07",
      "file": "cross-domain/astrophysics-physics/b-primordial-nucleosynthesis-reaction-networks.yaml"
    },
    {
      "id": "b-aerosol-nucleation-cloud-formation",
      "title": "Atmospheric aerosol particles act as cloud condensation nuclei (CCN) by reducing the Kelvin-barrier to droplet nucleation, quantified by classical nucleation theory: droplet formation requires supersaturation S > S_crit = exp(4σ*M_w / (ρ_w*R*T*r)) where the critical radius r_crit = 2σ/(ρ_w*R*T*ln(S)) determines which particles activate as cloud droplets",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "Classical nucleation theory (CNT) describes how supersaturated water vapor activates aerosol particles into cloud droplets: a particle of radius r with water-soluble fraction acts as a CCN if ambient supersaturation S exceeds S_crit(r, composition) from Köhler theory, which balances the Kelvin curva",
      "open_unknowns": [],
      "related_hypotheses": [],
      "communication_gap": "Atmospheric scientists measure aerosol optical depth and CCN concentrations in field campaigns while physical chemists study nucleation in controlled laboratory settings; translating laboratory nucleation kinetics to global climate models requires bridging microphysical nucleation theory with mesoscale cloud parameterization schemes.",
      "translation_table": [
        {
          "field_a_term": "cloud condensation nucleus (CCN) activation (atmospheric science)",
          "field_b_term": "nucleation barrier crossing in classical nucleation theory (physics)",
          "note": "A particle 'activates' when S > S_crit; this is the classical nucleation free-energy barrier overcome by supersaturation"
        },
        {
          "field_a_term": "Köhler curve critical supersaturation S_crit (atmospheric science)",
          "field_b_term": "saddle point in Gibbs free energy surface of nucleating droplet (physics)",
          "note": "S_crit corresponds to the free-energy maximum; beyond r*, droplets grow spontaneously without further supersaturation"
        },
        {
          "field_a_term": "aerosol size distribution N(r) (atmospheric science)",
          "field_b_term": "distribution of nucleation rates J(r, S) over particle population (physics)",
          "note": "CCN concentration = ∫_{r > r_crit(S)} N(r)dr; determines cloud droplet number and radiative properties"
        },
        {
          "field_a_term": "indirect aerosol effect on cloud albedo (atmospheric science)",
          "field_b_term": "nucleation rate sensitivity to aerosol surface energy σ (physics)",
          "note": "More CCN → smaller droplets → higher cloud albedo (Twomey effect); quantified through d(ln N_d)/d(ln N_a)"
        }
      ],
      "references": [
        {
          "doi": "10.1175/1520-0477(1974)055<1100:TEAACC>2.0.CO;2",
          "note": "Twomey (1974) - pollution and planetary albedo: original indirect aerosol effect"
        },
        {
          "doi": "10.1029/1999GL900161",
          "note": "Abdul-Razzak & Ghan (2000) - parameterization of CCN activation for climate models"
        },
        {
          "doi": "10.5194/acp-7-1961-2007",
          "note": "Petters & Kreidenweis (2007) - single parameter representation of aerosol CCN activity"
        }
      ],
      "last_reviewed": "2026-05-07",
      "file": "cross-domain/atmospheric-science-physics/b-aerosol-nucleation-cloud-formation.yaml"
    },
    {
      "id": "b-xenobiotic-metabolism-cyp450",
      "title": "Xenobiotic metabolism by cytochrome P450 enzymes follows Michaelis-Menten saturable kinetics v = V_max*[S]/(K_m + [S]) where each CYP isoform (CYP3A4, CYP2D6, CYP2C9) has distinct substrate specificity encoded in the active site topology, and competitive inhibition between co-administered drugs follows the Dixon equation for competitive inhibition, providing a biochemical kinetics framework for predicting drug-drug interactions",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "CYP450-mediated drug metabolism maps directly onto Michaelis-Menten enzyme kinetics: the metabolic rate v = V_max*[S]/(K_m + [S]) for each CYP isoform, with K_m reflecting drug-enzyme binding affinity and V_max = k_cat*[E_total] depending on enzyme expression level; competitive inhibition by a secon",
      "open_unknowns": [],
      "related_hypotheses": [],
      "communication_gap": "Pharmacologists study drug-drug interactions from clinical observations and population PK studies while biochemists characterize enzyme kinetics in purified systems; translating in vitro CYP kinetic data (K_m, K_i) to quantitative in vivo interaction predictions requires understanding the allometric scaling of hepatic blood flow and microsomal protein binding that bridges enzymology to clinical pharmacokinetics.",
      "translation_table": [
        {
          "field_a_term": "CYP450 isoform substrate metabolic rate (pharmacology)",
          "field_b_term": "Michaelis-Menten enzyme velocity v = Vmax[S]/(Km+[S]) (biochemistry)",
          "note": "Each CYP isoform has its own K_m and V_max for each substrate; measured by in vitro microsomal incubation"
        },
        {
          "field_a_term": "drug-drug interaction via CYP3A4 inhibition (pharmacology)",
          "field_b_term": "competitive enzyme inhibition altering apparent K_m (biochemistry)",
          "note": "Inhibitor raises apparent K_m by factor (1 + [I]/K_i) without changing V_max; increases AUC of victim drug"
        },
        {
          "field_a_term": "CYP2D6 poor metabolizer phenotype (pharmacology)",
          "field_b_term": "loss-of-function allele reducing k_cat and E_total (biochemistry)",
          "note": "Polymorphic CYP2D6 reduces V_max proportionally to functional allele copies; causes standard dosing toxicity"
        },
        {
          "field_a_term": "CYP3A4 induction by rifampicin (pharmacology)",
          "field_b_term": "transcriptional upregulation increasing [E_total] and V_max (biochemistry)",
          "note": "PXR nuclear receptor activation by rifampicin upregulates CYP3A4 expression, reducing drug AUC by >10-fold"
        }
      ],
      "references": [
        {
          "doi": "10.1124/dmd.108.020198",
          "note": "Guengerich (2008) - cytochrome P450 enzymes and drug metabolism"
        },
        {
          "doi": "10.1002/cpt.1",
          "note": "Rowland Yeo et al. (2013) - prediction of drug-drug interactions from in vitro CYP data"
        },
        {
          "doi": "10.1038/nrd1666",
          "note": "Rendic & Di Carlo (1997) - human cytochrome P450 enzymes: review of metabolizing activities"
        }
      ],
      "last_reviewed": "2026-05-07",
      "file": "cross-domain/biochemistry-pharmacology/b-xenobiotic-metabolism-cyp450.yaml"
    },
    {
      "id": "b-enzyme-allostery-monod-wyman-changeux-model",
      "title": "Enzyme allostery — the regulation of enzyme activity by molecules binding at sites remote from the active site — is formalized by the Monod-Wyman-Changeux (MWC) model from biophysics, which treats the enzyme as a two-state thermodynamic system whose T (tense/inactive) ↔ R (relaxed/active) equilibrium is shifted by ligand binding, explaining cooperative kinetics and sigmoidal dose-response curves.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "The MWC model for an n-subunit enzyme with allosteric constant L = [T₀]/[R₀]: saturation function Y = α(1+α)^{n-1} + Lc·α(1+cα)^{n-1} / [(1+α)^n + L(1+cα)^n] where α = [A]/K_R (ligand/active-site affinity), c = K_R/K_T < 1 (allosteric ratio). Cooperativity coefficient h (Hill slope) = n·(1-f)/[f+(1-",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-dynamic-allostery-entropy-driven-conformational-selection"
      ],
      "communication_gap": "Biochemists measuring enzyme kinetics and biophysicists modeling protein conformational ensembles share the MWC framework but often present it at different mathematical levels; the modern \"statistical mechanical\" view of allostery (every protein is always in all conformations; ligands shift populations) is fully accepted in biophysics but is only slowly replacing \"induced fit\" language in biochemistry education; information-theoretic approaches to allosteric communication capacity are an emerging frontier.\n",
      "translation_table": [
        {
          "field_a_term": "allosteric transition T↔R (biochemistry)",
          "field_b_term": "two-state thermodynamic equilibrium with Boltzmann weights (biophysics)",
          "note": "The T/R ratio is exp(-ΔG/kT); effectors shift ΔG and thus the equilibrium"
        },
        {
          "field_a_term": "Hill coefficient n_H (biochemistry)",
          "field_b_term": "apparent cooperativity from multi-state Boltzmann system (biophysics)",
          "note": "n_H measures deviation from Michaelis-Menten; MWC explains it without invoking concerted binding"
        },
        {
          "field_a_term": "allosteric effector (biochemistry)",
          "field_b_term": "external field biasing a two-state partition function (biophysics)",
          "note": "Activator stabilizes R state; inhibitor stabilizes T state; thermodynamics predicts magnitude"
        },
        {
          "field_a_term": "sigmoidal dose-response curve (biochemistry)",
          "field_b_term": "logistic function from two-state Boltzmann partition (biophysics)",
          "note": "Hill equation is an approximation to MWC; exact sigmoidal shape follows from the full MWC partition function"
        }
      ],
      "references": [
        {
          "doi": "10.1016/S0022-2836(65)80285-6",
          "note": "Monod, Wyman & Changeux (1965) - on the nature of allosteric transitions (original MWC paper)"
        },
        {
          "doi": "10.1016/j.jmb.2012.08.021",
          "note": "Changeux & Edelstein (2005) - allosteric mechanisms of signal transduction (review)"
        },
        {
          "doi": "10.1126/science.1207598",
          "note": "Motlagh et al. (2014) - the ensemble nature of allostery (statistical mechanics view)"
        }
      ],
      "last_reviewed": "2026-05-07",
      "file": "cross-domain/biochemistry-thermodynamics/b-enzyme-allostery-monod-wyman-changeux-model.yaml"
    },
    {
      "id": "b-fermentation-thermodynamic-equilibrium",
      "title": "Microbial fermentation pathway selection is governed by thermodynamic free energy minimisation: the Gibbs free energy change ΔG° of each metabolic reaction determines which pathways are feasible, and cells regulate NAD⁺/NADH ratios to maintain ΔG < 0 across the fermentation network even when ATP yield is suboptimal.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "Fermentation is the anaerobic oxidation of organic compounds coupled to ATP synthesis without a terminal inorganic electron acceptor. The pathway a microbe takes (homolactic, ethanolic, butyric, etc.) depends on the thermodynamic feasibility of each reaction: ΔG = ΔG° + RT ln Q must be negative for ",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-fermentation-nad-ratio-pathway-selection-thermodynamic"
      ],
      "communication_gap": "Biochemistry textbooks describe fermentation pathways in mechanistic detail (enzyme names, cofactors) but rarely perform thermodynamic feasibility analysis. Thermodynamicists studying biological energy conversion rarely engage with detailed metabolic network topology. Alberty's transformed Gibbs energy framework (2003 J. Phys. Chem. Ref. Data) is underused in metabolic engineering.\n",
      "translation_table": [
        {
          "field_a_term": "NAD⁺/NADH redox couple potential E'°(NAD⁺/NADH) = −0.32 V",
          "field_b_term": "Half-cell potential in an electrochemical cell",
          "note": "Fermentation reactions use NADH reoxidation as the electron sink; E sets the thermodynamic driving force"
        },
        {
          "field_a_term": "ΔG of glycolysis (−73 kJ/mol glucose under standard conditions)",
          "field_b_term": "Work available from fuel oxidation in a heat engine",
          "note": "Efficiency of ATP capture = ΔG_ATP / ΔG_glycolysis; fermentation captures ~30%, aerobiosis ~40%"
        },
        {
          "field_a_term": "Fermentation pathway branching point (pyruvate to lactate vs. ethanol)",
          "field_b_term": "Kinetic/thermodynamic competition between parallel reaction channels",
          "note": "The pathway with more negative ΔG dominates; enzyme expression adjusts flux to maintain ΔG < 0"
        },
        {
          "field_a_term": "Product inhibition (ethanol toxicity in yeast)",
          "field_b_term": "Le Chatelier's principle: product accumulation shifts equilibrium backward",
          "note": "Ethanol concentration drives ΔG toward zero; cells must expel product to maintain thermodynamic drive"
        }
      ],
      "references": [
        {
          "doi": "10.1021/jp020625u",
          "note": "Alberty (2003) – thermodynamics of biochemical reactions; transformed Gibbs energies at pH 7"
        },
        {
          "doi": "10.1128/MMBR.00008-10",
          "note": "Thauer et al. (2008) Microbiol. Mol. Biol. Rev. – methanogenic archaea thermodynamics; ΔG of fermentation steps"
        },
        {
          "doi": "10.1073/pnas.1112325108",
          "note": "Flamholz et al. (2012) PNAS – thermodynamic constraints on metabolic flux; eQuilibrator database"
        },
        {
          "doi": "10.1038/nchembio.971",
          "note": "Henry et al. (2007) Nature Chem. Biol. – thermodynamics-based metabolic flux analysis"
        }
      ],
      "last_reviewed": "2026-05-07",
      "file": "cross-domain/biochemistry-thermodynamics/b-fermentation-thermodynamic-equilibrium.yaml"
    },
    {
      "id": "b-hydrothermal-vent-prebiotic-networks",
      "title": "Hydrothermal vent geochemistry provides a natural autocatalytic reaction network with proton gradients, mineral catalysts, and thermodynamic disequilibria that can drive prebiotic chemical evolution — making alkaline vent systems the most plausible abiogenesis laboratory and connecting deep-sea geochemistry to origin of life chemistry.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "proposed",
      "bridge_claim": "Alkaline hydrothermal vents (Lost City type) produce fluids rich in H2 and CH4 at pH 9-11, in contact with CO2-rich ocean water at pH ~8 — maintaining a proton gradient of ~3 pH units across thin Fe-Ni-S mineral membranes. Lane and Martin (2010) argued this gradient is homologous to the proton-motiv",
      "open_unknowns": [],
      "related_hypotheses": [],
      "communication_gap": "",
      "translation_table": [
        {
          "field_a_term": "proton gradient across vent mineral membrane",
          "field_b_term": "primordial proton-motive force (proto-ATP synthase driver)",
          "note": "The ~180 mV gradient across Fe-Ni-S membranes is comparable to the 150-200 mV used by modern cells"
        },
        {
          "field_a_term": "autocatalytic reaction set",
          "field_b_term": "self-sustaining metabolic network (Kauffman autocatalytic set)",
          "note": "A set of reactions where every member is catalysed by some product of the set"
        },
        {
          "field_a_term": "Fe-Ni-S mineral catalyst",
          "field_b_term": "proto-ferredoxin (iron-sulfur clusters in modern enzymes)",
          "note": "Evolutionary relics of the vent minerals that catalysed the first metabolic reactions"
        },
        {
          "field_a_term": "geochemical free energy (H2/CO2 disequilibrium)",
          "field_b_term": "metabolic free energy driving biosynthesis",
          "note": "The thermodynamic driver for both prebiotic and biotic carbon fixation"
        }
      ],
      "references": [
        {
          "doi": "10.1038/nrmicro2534",
          "note": "Lane & Martin (2010) — the energetics of genome complexity; alkaline vents and the origin of life"
        },
        {
          "doi": "10.1073/pnas.0307490100",
          "note": "Russell & Martin (2004) — the rocky roots of the acetyl-CoA pathway"
        },
        {
          "doi": "10.1126/science.1201966",
          "note": "Kelley et al. (2001) — an off-axis hydrothermal vent field near the Mid-Atlantic Ridge"
        }
      ],
      "last_reviewed": "2026-05-07",
      "file": "cross-domain/biology-astrobiology/b-hydrothermal-vent-prebiotic-networks.yaml"
    },
    {
      "id": "b-lichen-astrobiology",
      "title": "Synthetic lichen-like microbial consortia engineered for biofabrication on Earth are functional analogs of the self-sustaining biosystems required for off-world resource utilisation.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "proposed",
      "bridge_claim": "Lichen — obligate mutualistic consortia of photosynthetic partners (algae or cyanobacteria) and heterotrophic fungi — are among Earth's most extreme-environment colonisers because the consortium achieves metabolic self-sufficiency: primary production feeds heterotrophic processing, which in turn cyc",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-lichen-consortium-metabolic-coupling"
      ],
      "communication_gap": "Astrobiology / space engineering literature focuses on abiotic chemistry, radiation tolerance, and mission engineering rather than synthetic-biology circuit design. Synthetic biologists rarely model closed-loop ecosystem stability; they optimise individual strains or two-strain interactions in controlled lab media, not multi-partner systems on regolith analogs. Ecology journals do not reach either community's readership.\n",
      "translation_table": [
        {
          "field_a_term": "lichen thallus (ecology)",
          "field_b_term": "biofabrication scaffold (materials science)",
          "note": "The structural unit produced by the consortium"
        },
        {
          "field_a_term": "photobiont (ecology)",
          "field_b_term": "autotrophic carbon/energy fixer (synthetic biology)",
          "note": "The partner that captures energy from the environment"
        },
        {
          "field_a_term": "mycobiont (ecology)",
          "field_b_term": "heterotrophic material processor (synthetic biology)",
          "note": "The partner that builds structures and recycles nutrients"
        },
        {
          "field_a_term": "regolith (astrobiology)",
          "field_b_term": "granular mineral feedstock (materials science)",
          "note": "Unprocessed solid substrate available in space environments"
        },
        {
          "field_a_term": "ISRU (astrobiology)",
          "field_b_term": "closed-loop bioproduction (synthetic biology)",
          "note": "Umbrella terms for the same engineering goal approached from different angles"
        }
      ],
      "references": [
        {
          "arxiv": "2406.02522",
          "note": "Harvested paper seeding u-synthetic-lichen-biofabrication"
        },
        {
          "doi": "10.1016/j.tibtech.2020.05.007",
          "note": "Mhatre et al. 2020 — synthetic consortia for biomanufacturing"
        },
        {
          "doi": "10.1089/ast.2017.1739",
          "note": "Verseux et al. 2018 — biologically-based life support for Mars"
        }
      ],
      "last_reviewed": "2026-05-04",
      "file": "cross-domain/biology-astrobiology/b-lichen-astrobiology.yaml"
    },
    {
      "id": "b-antibiotic-mechanisms-resistance",
      "title": "Antibiotic mechanisms and resistance bridge biology and chemistry: four mechanistic target classes (cell wall, ribosome, DNA replication, membrane), matched by four resistance mechanisms (enzymatic inactivation, efflux, target modification, bypass), drive the ESKAPE pathogen crisis killing 1.27M/year with 10M projected by 2050.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "Antibiotics target essential bacterial biochemical processes: (1) Cell wall synthesis: ╬▓-lactams (penicillin, cephalosporins, carbapenems) inhibit penicillin-binding proteins (PBPs) ΓÇö transpeptidases that crosslink peptidoglycan strands. Mechanism: ╬▓-lactam acylates the PBP active site Ser ΓåÆ i",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-phage-therapy-combination-delays-resistance-evolution-eskape"
      ],
      "communication_gap": "Microbiologists who study resistance mechanisms in clinical settings rarely engage with the organic chemistry of antibiotic mechanisms or the chemical biology of resistance enzyme active sites. Medicinal chemists developing new ╬▓-lactam inhibitors rarely engage with the clinical epidemiology of resistance spread (plasmid conjugation, clonal expansion, hospital infection control). The WHO policy community that sets global AMR targets rarely integrates the chemistry of mechanism-based inactivation.\n",
      "translation_table": [
        {
          "field_a_term": "╬▓-lactam ring (4-membered cyclic amide)",
          "field_b_term": "electrophilic acylating agent targeting PBP active-site serine",
          "note": "ring strain makes ╬▓-lactam highly reactive; ╬▓-lactamase hydrolysis opens ring ΓåÆ inactivation"
        },
        {
          "field_a_term": "╬▓-lactamase (hydrolytic enzyme)",
          "field_b_term": "serine or metallo-hydrolase that opens the ╬▓-lactam ring",
          "note": ">400 unique ╬▓-lactamase families; class B (metallo) require Zn┬▓Γü║ and resist all ╬▓-lactam inhibitors"
        },
        {
          "field_a_term": "AcrAB-TolC efflux pump",
          "field_b_term": "tripartite RND transporter spanning both membranes of Gram-negative cell envelope",
          "note": "proton motive force-driven; exports structurally diverse substrates; overexpression ΓåÆ multidrug resistance"
        },
        {
          "field_a_term": "PBP2a (MRSA resistance protein)",
          "field_b_term": "low-affinity transpeptidase with penicillin-binding site occluded by allosteric domain",
          "note": "mecA gene acquired horizontally from S. sciuri; structural change reduces ╬▓-lactam acylation rate >1000-fold"
        },
        {
          "field_a_term": "fluoroquinolone (ciprofloxacin)",
          "field_b_term": "Mg┬▓Γü║-chelating topoisomerase poison; ternary complex drug-DNA-enzyme ΓåÆ strand break",
          "note": "resistance via GyrA mutations (Ser83Leu) that reduce drug binding; chromosomal not plasmid-mediated initially"
        }
      ],
      "references": [
        {
          "note": "Walsh (2003) Antibiotics ΓÇö Actions, Origins, Resistance; ASM Press"
        },
        {
          "doi": "10.1016/S0140-6736(21)02724-0",
          "note": "Murray et al. (2022) Global burden of bacterial antimicrobial resistance in 2019 ΓÇö a systematic analysis; Lancet 399:629"
        },
        {
          "doi": "10.1016/S1473-3099(13)70318-9",
          "note": "Laxminarayan et al. (2013) Antibiotic resistance ΓÇö the need for global solutions; Lancet Infect Dis 13:1057"
        },
        {
          "doi": "10.1038/nrd3975",
          "note": "Lewis (2013) Platforms for antibiotic discovery; Nat Rev Drug Discov 12:371"
        }
      ],
      "last_reviewed": "2026-05-07",
      "file": "cross-domain/biology-chemistry/b-antibiotic-mechanisms-resistance.yaml"
    },
    {
      "id": "b-autophagy-cellular-recycling",
      "title": "Autophagy couples cell biology and chemistry: a double-membrane vesicle (autophagosome) delivers cytoplasmic cargo to the lysosome for enzymatic degradation and molecular recycling — a biological waste management and nutrient recovery system with precise chemical machinery.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "Autophagy (Ohsumi, Nobel Prize 2016) is the cell's primary bulk degradation pathway. mTOR complex 1 (mTORC1) phosphorylates and inhibits ULK1; nutrient deprivation releases this inhibition → ULK1 activates the PI3K complex (VPS34/ Beclin-1) → PI3P nucleates the phagophore membrane. ATG proteins (ATG",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-pink1-parkin-mitophagy-parkinsons-therapeutic-target"
      ],
      "communication_gap": "Cell biologists who study autophagy by fluorescence microscopy (LC3 puncta, p62 clearance) and geneticists who map ATG gene function rarely engage with the lipid chemistry of membrane formation or the enzymology of lysosomal degradation. The chemical reconstitution and quantitative biochemistry are published in chemistry and biophysics journals (JACS, eLife biophysics) rarely read by cell biologists.\n",
      "translation_table": [
        {
          "field_a_term": "mTOR inhibition (cellular starvation signal)",
          "field_b_term": "upstream kinase-regulated initiation of membrane remodeling cascade",
          "note": "connects nutrient sensing to lipid chemistry of autophagosome formation"
        },
        {
          "field_a_term": "LC3-II (autophagosome marker)",
          "field_b_term": "phosphatidylethanolamine-conjugated ubiquitin-like protein",
          "note": "the lipidation reaction (ATG7/ATG3 E1/E2 cascade) is a chemical conjugation"
        },
        {
          "field_a_term": "p62/SQSTM1 cargo receptor",
          "field_b_term": "polyubiquitin-binding adaptor with LC3-interacting region (LIR)",
          "note": "molecular bridge between ubiquitin code and autophagy machinery"
        },
        {
          "field_a_term": "PINK1-Parkin mitophagy",
          "field_b_term": "phosphorylated ubiquitin (pSer65) as a chemical damage signal",
          "note": "kinase-generated phosphoubiquitin is the chemical tag triggering selective mitophagy"
        },
        {
          "field_a_term": "lysosomal degradation",
          "field_b_term": "acid hydrolase chemistry (cathepsins B/D/L, lipases, glycosidases)",
          "note": "biochemical breakdown returning monomers to biosynthetic pools"
        }
      ],
      "references": [
        {
          "note": "Ohsumi (2016) Autophagy — an intracellular recycling system; Nobel Lecture"
        },
        {
          "doi": "10.1016/j.cell.2011.10.026",
          "note": "Mizushima et al. (2011) The role of Atg proteins in autophagosome formation; Cell 147:728"
        },
        {
          "doi": "10.1038/nrmicro2822",
          "note": "Youle & Narendra (2011) Mechanisms of mitophagy; Nat Rev Mol Cell Biol 12:9"
        },
        {
          "doi": "10.1016/j.cell.2007.12.018",
          "note": "Levine & Kroemer (2008) Autophagy in the pathogenesis of disease; Cell 132:27"
        }
      ],
      "last_reviewed": "2026-05-07",
      "file": "cross-domain/biology-chemistry/b-autophagy-cellular-recycling.yaml"
    },
    {
      "id": "b-circadian-clock-molecular-oscillator",
      "title": "The ~24-hour circadian clock in eukaryotes is a biochemical limit-cycle oscillator: the PER/CRY/CLOCK/BMAL1 transcription-translation feedback loop generates self-sustained oscillations described by Goodwin-type nonlinear ODEs, and the clock's period, amplitude, and entrainability are predicted by the Hopf bifurcation structure of the oscillator.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "The core circadian oscillator is a negative feedback loop: CLOCK:BMAL1 activates Per and Cry transcription; PER:CRY proteins accumulate, enter the nucleus, and repress CLOCK:BMAL1. This is a delayed negative feedback loop producing limit-cycle oscillations (Goodwin 1965). Goldbeter (1995) showed tha",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-circadian-hopf-bifurcation-period-mutation-prediction"
      ],
      "communication_gap": "Chronobiologists study PER/CRY kinetics experimentally (genetic screens, luciferase reporters) while nonlinear dynamicists study limit-cycle theory abstractly. Goldbeter's 1995 PNAS paper and Leloup & Goldbeter 2003 established the bridge, but most molecular chronobiology papers still describe the clock in biochemical terms without referencing the Hopf bifurcation structure or limit-cycle theory explicitly.\n",
      "translation_table": [
        {
          "field_a_term": "PER/CRY nuclear repression of CLOCK:BMAL1",
          "field_b_term": "Negative feedback in a nonlinear ODE oscillator (Goodwin model)",
          "note": "Delayed negative feedback with cooperative Hill-function repression (n > 8) = Hopf bifurcation"
        },
        {
          "field_a_term": "Circadian period (~24h)",
          "field_b_term": "Period of the limit cycle at the Hopf bifurcation",
          "note": "Period is set by the delay in the feedback loop (mRNA half-life, nuclear transport time)"
        },
        {
          "field_a_term": "Temperature compensation of period",
          "field_b_term": "Robustness of limit-cycle period to parameter variation",
          "note": "Biological circuits tune multiple rate constants so period is insensitive to temperature Q₁₀"
        },
        {
          "field_a_term": "Photic entrainment by light pulses (phase response curve)",
          "field_b_term": "External forcing of a limit-cycle oscillator (Arnold tongues)",
          "note": "Phase response curve = Jacobian of the oscillator limit cycle under impulsive perturbation"
        }
      ],
      "references": [
        {
          "doi": "10.1073/pnas.92.21.9383",
          "note": "Goldbeter (1995) PNAS – a model for circadian oscillations in Drosophila per protein; Goodwin-type oscillator"
        },
        {
          "doi": "10.1073/pnas.0306901100",
          "note": "Leloup & Goldbeter (2003) PNAS – toward a detailed computational model for the mammalian circadian clock"
        },
        {
          "doi": "10.1038/35036009",
          "note": "Dunlap (1999) Cell – molecular bases for circadian clocks; feedback loop review"
        },
        {
          "doi": "10.1126/science.1089924",
          "note": "Schibler & Sassone-Corsi (2002) Cell – a web of circadian pacemakers; systems perspective"
        }
      ],
      "last_reviewed": "2026-05-07",
      "file": "cross-domain/biology-chemistry/b-circadian-clock-molecular-oscillator.yaml"
    },
    {
      "id": "b-enzyme-allostery-conformational",
      "title": "Allosteric enzyme regulation follows the Monod-Wyman-Changeux (MWC) model — cooperative T↔R conformational equilibrium governed by the Hill equation — a mathematical framework identical to cooperative binding in hemoglobin, ion channel gating, and gene expression switch behaviour.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "ALLOSTERY DEFINITION: A ligand binding at one site changes activity at a distant active site via conformational change. Cannot be explained by direct steric blockade.\nMWC MODEL (Monod-Wyman-Changeux 1965): Oligomeric enzyme exists in two conformational states: - T (tense/inactive): low affinity, low",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-allostery-entropic-communication-pathway"
      ],
      "communication_gap": "Biochemists learn the MWC model and Hill equation as empirical tools without recognising the Ising model connection. Statistical physicists working on two-state systems rarely engage with the allosteric enzyme literature. The bridge is noted by Changeux & Edelstein (2005) in Science but remains underexploited for drug design.\n",
      "translation_table": [
        {
          "field_a_term": "allosteric constant L = T/R equilibrium (biochemistry)",
          "field_b_term": "Boltzmann factor for two-state system (statistical mechanics)",
          "note": "L = exp(ΔG_TR / kT) — the MWC model is literally a statistical mechanical two-state partition function"
        },
        {
          "field_a_term": "Hill coefficient n (biochemistry)",
          "field_b_term": "cooperativity / sharpness of switch",
          "note": "n can be derived from the MWC model as n_eff = n·L/(1+L) — depends on allosteric constant"
        },
        {
          "field_a_term": "R state (relaxed, active) / T state (tense, inactive)",
          "field_b_term": "open / closed channel states (biophysics)",
          "note": "MWC applies directly to ion channels with voltage as the allosteric effector"
        },
        {
          "field_a_term": "conformational change upon ligand binding",
          "field_b_term": "induced fit vs. conformational selection (structural biology)",
          "note": "MWC = conformational selection (pre-existing equilibrium); KNF = induced fit (sequential)"
        }
      ],
      "references": [
        {
          "doi": "10.1016/0022-2836(65)90184-3",
          "note": "Monod et al. (1965) J Mol Biol 12:88 — original MWC model"
        },
        {
          "note": "Hill (1910) J Physiol 40:iv — Hill equation for cooperative binding"
        },
        {
          "doi": "10.1021/bi00829a011",
          "note": "Koshland et al. (1966) Biochemistry 5:365 — KNF sequential model"
        },
        {
          "doi": "10.1002/prot.20330",
          "note": "Gunasekaran et al. (2004) Proteins 57:433 — allostery without conformational change"
        }
      ],
      "last_reviewed": "2026-05-06",
      "file": "cross-domain/biology-chemistry/b-enzyme-allostery-conformational.yaml"
    },
    {
      "id": "b-glycobiology-cell-recognition",
      "title": "Glycobiology and Cell Recognition — the glycocalyx sugar code, ABO blood groups, selectin-mediated leukocyte rolling, and sialic acid as influenza species barrier",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "Glycans (complex oligosaccharide chains) coat every eukaryotic cell surface, forming the glycocalyx — a dense, highly information-rich extracellular layer. The sugar code: the information density of oligosaccharides vastly exceeds that of amino acid or nucleotide sequences. A tetrasaccharide has 35,",
      "open_unknowns": [],
      "related_hypotheses": [],
      "communication_gap": "Glycobiology has historically been technically challenging — glycans cannot be directly genetically encoded and require specialised mass spectrometry and NMR for structural characterisation. As a result, glycans were underrepresented in the proteomics revolution and the field developed somewhat in isolation. The Human Glycome Project and advances in glycan synthesis (Wong, Seeberger) are now enabling systematic glycan arrays and functional screens, beginning to integrate glycobiology with genomics and cell biology communities.\n",
      "translation_table": [
        {
          "field_a_term": "N-linked glycan (Asn-GlcNAc₂Man₉ core)",
          "field_b_term": "co-translational protein folding quality control tag",
          "note": "Calnexin binds monoglucosylated glycoproteins; UGGT re-glucosylates misfolded proteins to retain them in ER"
        },
        {
          "field_a_term": "ABO glycosyltransferases (GTA/GTB)",
          "field_b_term": "blood group antigens governing transfusion and transplant compatibility",
          "note": "O allele has 4-nt deletion creating frameshift → no functional enzyme → H antigen unmodified"
        },
        {
          "field_a_term": "selectin–sialyl Lewis^x interaction",
          "field_b_term": "leukocyte rolling on inflamed endothelium (first step of extravasation)",
          "note": "Force-dependent bond: selectin bonds are catch bonds — affinity increases with tensile force, enabling rolling not arrest"
        },
        {
          "field_a_term": "sialic acid linkage (alpha-2,3 vs alpha-2,6)",
          "field_b_term": "influenza host tropism and pandemic potential barrier",
          "note": "A single HA residue change (226 Q→L in H3) switches alpha-2,3 to alpha-2,6 preference and enables human transmission"
        },
        {
          "field_a_term": "polysialic acid (PSA) on NCAM",
          "field_b_term": "anti-adhesion modifier reducing synaptic plasticity during development",
          "note": "PSA-NCAM expression is inversely correlated with synaptic stabilisation; PSA removal triggers synapse formation"
        },
        {
          "field_a_term": "glycocalyx thickness (50–500 nm)",
          "field_b_term": "mechanical exclusion layer controlling ligand access to cell surface receptors",
          "note": "Glycocalyx compression is a force that cells must overcome for receptor engagement; relevant in cancer cell invasion"
        }
      ],
      "references": [
        {
          "doi": "10.1093/glycob/3.2.97",
          "note": "Varki (1993) Glycobiology 3:97 — biological roles of oligosaccharides"
        },
        {
          "doi": "10.1021/cr9004749",
          "note": "Dwek (1996) Chem Rev 96:683 — glycobiology: toward understanding the function of sugars"
        },
        {
          "doi": "10.1146/annurev-cellbio-100109-104034",
          "note": "McEver & Zhu (2010) Annu Rev Cell Dev Biol 26:363 — selectin catch bonds"
        },
        {
          "doi": "10.1146/annurev.biochem.69.1.531",
          "note": "Skehel & Wiley (2000) Annu Rev Biochem 69:531 — influenza hemagglutinin and receptor binding"
        }
      ],
      "last_reviewed": "2026-05-06",
      "file": "cross-domain/biology-chemistry/b-glycobiology-cell-recognition.yaml"
    },
    {
      "id": "b-lipid-bilayer-membrane-thermodynamics",
      "title": "Lipid bilayer phase transitions from gel to fluid follow Landau free energy theory F = a(T-T_m)phi^2 + b*phi^4, with the transition temperature T_m tunable by lipid composition and cholesterol; membrane permeability and compressibility diverge near T_m in precise analogy to critical phenomena, connecting thermodynamic phase transition physics to membrane biophysics and the Meyer-Overton anesthetic mechanism.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "Lipid bilayers undergo gel (Lbeta) to liquid-crystalline (Lalpha) phase transitions at melting temperatures T_m (typically 20-45C for physiological lipids). Below T_m: ordered gel phase with all-trans acyl chains (low lateral diffusion D ~ 10^-10 cm^2/s). Above T_m: disordered liquid crystalline pha",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-anesthesia-consciousness-thalamic-disruption"
      ],
      "communication_gap": "Physical chemistry of membranes developed largely in biophysics departments while cell biology treated membranes primarily as protein scaffolds. The critical phenomena analogy was made explicit by Mouritsen's Pink model in the 1980s but has not penetrated cell biology or pharmacology textbooks. Anesthesiology journals rarely cite Landau free energy or phase transition theory.\n",
      "translation_table": [
        {
          "field_a_term": "gel phase (ordered acyl chains, low diffusivity)",
          "field_b_term": "ordered/crystalline phase in condensed matter",
          "note": "all-trans acyl chains analogous to crystalline lattice ordering"
        },
        {
          "field_a_term": "liquid crystalline phase (disordered, high lateral diffusion)",
          "field_b_term": "liquid/disordered phase",
          "note": "gauche conformers create conformational disorder analogous to thermal disorder in liquids"
        },
        {
          "field_a_term": "melting temperature T_m",
          "field_b_term": "critical temperature T_c (or T_m in solid-liquid transition)",
          "note": "T_m increases with chain length (C14:0 < C16:0 < C18:0) and degree of saturation"
        },
        {
          "field_a_term": "membrane isothermal area compressibility modulus K_A",
          "field_b_term": "isothermal compressibility diverging near phase transition",
          "note": "both diverge near T_c; fluctuations couple to permeability and cellular signaling"
        },
        {
          "field_a_term": "lipid rafts (liquid ordered Lo phase from cholesterol + sphingomyelin)",
          "field_b_term": "phase-separated microdomains in a binary mixture",
          "note": "Lo phase has intermediate properties between gel and liquid disordered phases"
        }
      ],
      "references": [
        {
          "doi": "10.1017/S0033583500000949",
          "note": "Chapman (1975) — Phase transitions and fluidity characteristics of lipids; Q Rev Biophys 8:185"
        },
        {
          "doi": "10.1016/S0006-3495(84)84040-9",
          "note": "Mouritsen & Bloom (1984) — Mattress model of lipid-protein interactions; Biophys J 46:141"
        },
        {
          "doi": "10.1146/annurev.biophys.33.110502.133731",
          "note": "Simons & Vaz (2004) — Model systems, lipid rafts and cell membranes; Annu Rev Biophys 33:269"
        },
        {
          "note": "Overton (1901) — Studien uber die Narkose; Gustav Fischer Verlag"
        }
      ],
      "last_reviewed": "2026-05-06",
      "file": "cross-domain/biology-chemistry/b-lipid-bilayer-membrane-thermodynamics.yaml"
    },
    {
      "id": "b-lipid-bilayer-membrane-transport",
      "title": "Saffman-Delbrück hydrodynamics and the fluid mosaic model unify soft-matter physics with biological membrane chemistry — lipid raft phase separation and ion transport are the same physics operating at the nanoscale",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "The plasma membrane is a 2D fluid: the Singer-Nicolson fluid mosaic model (1972) treats membrane proteins as diffusing in a viscous 2D lipid bilayer. The Saffman-Delbrück (1975) formula D ≈ kT/(4πηh) predicts lateral diffusion coefficients measured by FRAP, linking physical chemistry (viscosity η, m",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-lipid-raft-phase-separation-receptor-clustering"
      ],
      "communication_gap": "Soft-matter physicists and physical chemists study membrane biophysics in journals like Biophysical Journal and Langmuir; cell biologists study membrane biology in Cell, Nature Cell Biology, and the Journal of Cell Biology. The mathematical frameworks (Cahn-Hilliard, Saffman-Delbrück) are standard in biophysics but absent from most cell biology curricula. Lipid raft debates persist partly because biologists and physical chemists have different standards of evidence for \"phase separation.\"\n",
      "translation_table": [
        {
          "field_a_term": "2D binary fluid phase separation (soft-matter physics)",
          "field_b_term": "lipid raft formation (cell biology)",
          "note": "Cholesterol-sphingolipid L_o phase in a 2D phospholipid L_d background is a biological spinodal/nucleation"
        },
        {
          "field_a_term": "Saffman-Delbrück diffusion (physics)",
          "field_b_term": "lateral protein mobility (cell biology, FRAP)",
          "note": "The hydrodynamic formula predicts membrane protein diffusion measured by fluorescence recovery"
        },
        {
          "field_a_term": "Nernst-Planck electrodiffusion (electrochemistry)",
          "field_b_term": "Goldman-Hodgkin-Katz ion current (neuroscience/cell biology)",
          "note": "GHK equation is the steady-state solution of electrodiffusion for a membrane with multiple permeant ions"
        },
        {
          "field_a_term": "Gibbs free energy of electrochemical gradient",
          "field_b_term": "proton motive force / ATP synthesis (biochemistry)",
          "note": "ΔG = RT·ln[C_out/C_in] + zFΔV is the same thermodynamic expression in both contexts"
        },
        {
          "field_a_term": "bilayer bending modulus κ (soft matter)",
          "field_b_term": "membrane tension and endocytosis threshold (cell biology)",
          "note": "Helfrich elasticity determines when cells can deform membranes for vesicle budding"
        }
      ],
      "references": [
        {
          "doi": "10.1126/science.175.4023.720",
          "note": "Singer & Nicolson (1972). The fluid mosaic model of the structure of cell membranes. Science 175:720."
        },
        {
          "doi": "10.1073/pnas.72.8.3111",
          "note": "Saffman & Delbrück (1975). Brownian motion in biological membranes. PNAS 72:3111."
        },
        {
          "note": "Goldman (1943). Potential, impedance, and rectification in membranes. J Gen Physiol 27:37."
        },
        {
          "doi": "10.1126/science.1174621",
          "note": "Lingwood & Simons (2010). Lipid rafts as a membrane-organizing principle. Science 327:46."
        }
      ],
      "last_reviewed": "2026-05-06",
      "file": "cross-domain/biology-chemistry/b-lipid-bilayer-membrane-transport.yaml"
    },
    {
      "id": "b-protein-folding-energy-landscape",
      "title": "Protein folding is explained by the funnel-shaped energy landscape theory: the native state is a deep, narrow free energy minimum, folding follows a downhill path through G(Q) parameterized by fraction of native contacts Q, and AlphaFold2 implicitly learns this landscape via evolutionary covariance contact predictions with near-experimental accuracy.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "Levinthal's paradox (1969): a 100-amino-acid protein has ~3^100 ≈ 10^48 conformations; even sampling at 10^13/s would take 10^27 years — far longer than the age of the universe. Yet proteins fold reproducibly in milliseconds to microseconds. Resolution: the energy landscape is funneled (Wolynes, Onu",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-protein-folding-funnel-alphafold2-contact-prediction-mechanism"
      ],
      "communication_gap": "The energy landscape framework was developed by theoretical biophysicists (Wolynes, Onuchic, Bryngelson, Dill) using statistical mechanics language not standard in biochemistry. Experimental protein chemists focused on Φ-value analysis and kinetics without the landscape framing. AlphaFold2 emerged from machine learning, disconnected from the biophysics theory. The theoretical prediction that AlphaFold2 implicitly computes the energy landscape minimum is not in the AlphaFold2 paper — it is a post-hoc interpretation that bridges the computational and theoretical communities.\n",
      "translation_table": [
        {
          "field_a_term": "protein folding funnel",
          "field_b_term": "funneled free energy landscape in configuration space G(Q)",
          "note": "Native state = deep free energy minimum; denatured = broad entropic basin"
        },
        {
          "field_a_term": "fraction of native contacts Q",
          "field_b_term": "order parameter for folding phase transition",
          "note": "Q ∈ [0,1]; two-state folding = first-order-like transition at Q*"
        },
        {
          "field_a_term": "folding rate k_f = k₀ exp(−ΔG‡/RT)",
          "field_b_term": "Arrhenius/Kramers rate for barrier crossing on free energy surface",
          "note": "k₀ ~ 10^7 s⁻¹ for diffusion-limited contact formation"
        },
        {
          "field_a_term": "Φ-value analysis (mutant effect on k_f and K_eq)",
          "field_b_term": "probe of transition state structure — position of TS on folding funnel",
          "note": "Φ ≈ 1 → residue natively structured in TS; Φ ≈ 0 → disordered"
        },
        {
          "field_a_term": "stability gap (E_native − E_glass > δ)",
          "field_b_term": "condition for directed folding vs. glass transition (Bryngelson-Wolynes)",
          "note": "Evolution has optimized sequences to satisfy the stability gap"
        },
        {
          "field_a_term": "AlphaFold2 evoformer attention",
          "field_b_term": "learned approximation to minimum of G(Q) from evolutionary data",
          "note": "Contact predictions from MSA covariation are a compressed representation of the landscape"
        }
      ],
      "references": [
        {
          "note": "Levinthal (1969) J Chim Phys 65:44 — Levinthal's paradox statement"
        },
        {
          "doi": "10.1073/pnas.84.21.7524",
          "note": "Bryngelson & Wolynes (1987) PNAS 84:7524 — statistical mechanics of protein folding and spin glasses"
        },
        {
          "doi": "10.1038/nsb0497-10",
          "note": "Dill & Chan (1997) Nat Struct Biol 4:10 — from Levinthal to pathways to funnels"
        },
        {
          "doi": "10.1038/s41586-021-03819-2",
          "note": "Jumper et al. (2021) Nature 596:583 — AlphaFold2 protein structure prediction"
        }
      ],
      "last_reviewed": "2026-05-06",
      "file": "cross-domain/biology-chemistry/b-protein-folding-energy-landscape.yaml"
    },
    {
      "id": "b-rna-folding-partition-function",
      "title": "RNA secondary structure prediction is a statistical-mechanics partition function problem: the ensemble of all possible base-pair configurations is weighted by Boltzmann factors exp(−ΔG°/RT), and the minimum free-energy structure, base- pair probabilities, and thermodynamic accessibility are all computed from the McCaskill partition function using dynamic programming.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "An RNA molecule of length N can adopt exponentially many secondary structures (base-pair pairings without pseudoknots). McCaskill (1990) showed that the partition function Z = Σ_s exp(−ΔG°(s)/RT), summing over all structures s with their free energy, can be computed in O(N³) time using dynamic progr",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-rna-boltzmann-ensemble-functional-structure-selection"
      ],
      "communication_gap": "RNA biologists routinely use RNAfold/mfold predictions without awareness of the statistical mechanics framework underlying the partition function. Statistical physicists studying disordered systems (spin glasses) have not extensively analysed the RNA folding problem despite formal similarities to the random energy model.\n",
      "translation_table": [
        {
          "field_a_term": "RNA partition function Z = Σ_s exp(−ΔG°(s)/RT)",
          "field_b_term": "Statistical mechanics partition function Z = Tr[exp(−βH)]",
          "note": "RNA structures = microstates; ΔG°(s) = Hamiltonian; T = thermodynamic temperature"
        },
        {
          "field_a_term": "Base-pair probability matrix P_ij",
          "field_b_term": "Thermal average of spin-spin correlation ⟨s_ij⟩",
          "note": "P_ij = probability base i pairs with base j in the Boltzmann ensemble"
        },
        {
          "field_a_term": "Minimum free energy (MFE) structure",
          "field_b_term": "Ground state configuration (lowest-energy microstate)",
          "note": "MFE = ground state; only dominant when energy gap to next structure is large (kT)"
        },
        {
          "field_a_term": "Dynamic programming recursion (McCaskill O(N³))",
          "field_b_term": "Transfer matrix method for 1D spin chains",
          "note": "Both exploit the non-crossing (planar) constraint to decompose the problem into independent subproblems"
        }
      ],
      "references": [
        {
          "doi": "10.1021/bi00413a004",
          "note": "McCaskill (1990) Biopolymers – partition function algorithm for RNA secondary structure; O(N³) DP"
        },
        {
          "doi": "10.1093/nar/19.22.6329",
          "note": "Zuker (1989) Science – mfold algorithm for minimum free energy RNA secondary structure"
        },
        {
          "doi": "10.1093/nar/gkh930",
          "note": "Lorenz et al. (2011) – ViennaRNA Package 2.0 including partition function and base-pair probabilities"
        },
        {
          "doi": "10.1016/j.jmb.2006.01.048",
          "note": "Tinoco & Bustamante – how RNA folds; thermodynamic vs. kinetic control of structure"
        }
      ],
      "last_reviewed": "2026-05-07",
      "file": "cross-domain/biology-chemistry/b-rna-folding-partition-function.yaml"
    },
    {
      "id": "b-rna-world-origin-of-life",
      "title": "The RNA world hypothesis bridges molecular biology and prebiotic chemistry: RNA molecules can both store genetic information and catalyze chemical reactions (ribozymes), suggesting that RNA preceded both DNA and proteins as the primordial self-replicating molecule at the origin of life.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "The RNA world hypothesis (Gilbert 1986) proposes a primordial epoch when RNA served both as genetic material (information storage, like DNA) and as catalytic molecules (ribozymes, like proteins). The discovery that the ribosome's peptidyl transferase center — the active site catalyzing peptide bond ",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-rna-world-ribozyme-first-protein-emergence"
      ],
      "communication_gap": "The RNA world hypothesis crosses the disciplinary boundary between molecular biology (studying extant life) and prebiotic chemistry (studying conditions 4 billion years ago). Biologists study ribozymes in the context of modern gene regulation (CRISPR RNAs, snRNPs); chemists study nucleotide synthesis and polymerization kinetics. These communities publish in different journals (Nature Chemical Biology vs. Journal of the American Chemical Society), attend different conferences, and rarely co-author. The prebiotic synthesis challenge — getting from simple molecules to activated nucleotides — was considered a chemistry problem; the replication challenge was considered a biology problem; the integration requires both simultaneously.\n",
      "translation_table": [
        {
          "field_a_term": "ribozyme (RNA enzyme, e.g., RNase P, Group I introns, ribosome)",
          "field_b_term": "protein enzyme (genetically encoded catalyst in modern biology)",
          "note": "ribozymes prove RNA can catalyze complex reactions — RNA preceded protein catalysis evolutionarily"
        },
        {
          "field_a_term": "RNA template (genetic information storage, single-stranded)",
          "field_b_term": "DNA double helix (modern genetic storage, chemically more stable)",
          "note": "DNA is biochemically modified RNA — 2'-OH removed (deoxyribose) + uracil → thymine (5-methyl uracil)"
        },
        {
          "field_a_term": "activated nucleotide (imidazolide-NMP, prebiotic polymerization substrate)",
          "field_b_term": "NTP (ATP, GTP — substrates for modern RNA polymerases)",
          "note": "same fundamental chemistry; modern enzymes evolved to handle NTPs more efficiently"
        },
        {
          "field_a_term": "non-enzymatic template copying (prebiotic RNA replication)",
          "field_b_term": "PCR amplification / enzymatic replication (modern biotechnology)",
          "note": "both achieve sequence-faithful information copying; enzyme-free version is ~1000× slower"
        },
        {
          "field_a_term": "autocatalytic RNA network (Lincoln-Joyce ribozyme pair)",
          "field_b_term": "metabolic network (enzyme-catalyzed biochemical cycles in modern cells)",
          "note": "both are self-sustaining catalytic networks; RNA network is evolutionary ancestor"
        }
      ],
      "references": [
        {
          "doi": "10.1038/319618a0",
          "note": "Gilbert (1986) The RNA world; Nature 319:618"
        },
        {
          "note": "Yonath (2009) Nobel Lecture — Ribosomes: from sloppy chemistry to exquisite molecular machines; Nobel Foundation"
        },
        {
          "doi": "10.1038/35053176",
          "note": "Szostak et al. (2001) Synthesizing life; Nature 409:387"
        },
        {
          "doi": "10.1126/science.1167856",
          "note": "Lincoln & Joyce (2009) Self-sustained replication of an RNA enzyme; Science 323:1229"
        }
      ],
      "last_reviewed": "2026-05-06",
      "file": "cross-domain/biology-chemistry/b-rna-world-origin-of-life.yaml"
    },
    {
      "id": "b-secondary-metabolites-drug-discovery",
      "title": "Biological secondary metabolites — assembled by modular PKS and NRPS molecular assembly lines — account for ~50% of approved drugs; genome mining of silent biosynthetic gene clusters in soil bacteria represents the largest untapped chemical diversity on Earth and the most promising pipeline for new antibiotic classes.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "Approximately 50% of all clinically approved drugs are natural products or their semi-synthetic derivatives (Newman & Cragg 2020). The biosynthetic logic of complex natural products uses modular enzyme assembly lines: polyketide synthases (PKS) build the polyketide scaffold by iterative Claisen cond",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-secondary-metabolites-pksnrps-combinatorial-evolution"
      ],
      "communication_gap": "Natural product chemistry and structural biology communities are distinct from microbial genomics and bioinformatics. Clinical pharmacology and drug discovery industry focuses on synthetic compound libraries for throughput, largely abandoning natural products despite their superior drug-likeness (chirality, macrocycle size, sp3 fraction). Regulatory agencies have slow approval timelines for complex natural product structures, discouraging industry investment even when genome mining identifies promising candidates.\n",
      "translation_table": [
        {
          "field_a_term": "Claisen condensation (organic chemistry)",
          "field_b_term": "PKS chain elongation (KS-catalysed decarboxylative condensation of malonyl-CoA)",
          "note": "Same mechanism used in fatty acid synthesis; PKS modules reuse β-oxidation enzymes in reverse"
        },
        {
          "field_a_term": "assembly-line synthesis (combinatorial chemistry)",
          "field_b_term": "modular PKS/NRPS biosynthesis (each module = one building block)",
          "note": "Combinatorial PKS engineering by module swapping has produced novel analogues of erythromycin"
        },
        {
          "field_a_term": "silent gene cluster (unexpressed secondary metabolism)",
          "field_b_term": "cryptic BGC (biosynthetically competent but not induced under lab conditions)",
          "note": "Genome mining predicts ~90% of microbial chemical diversity is unexpressed in culture"
        },
        {
          "field_a_term": "CRISPR gene activation (CRISPRa)",
          "field_b_term": "BGC awakening — activating silent clusters to produce novel metabolites",
          "note": "dCas9-VP64 fused to promoter of BGC activator gene induces cluster expression without DNA editing"
        },
        {
          "field_a_term": "drug scaffold diversification (medicinal chemistry)",
          "field_b_term": "semi-synthetic modification of natural product core (erythromycin → azithromycin)",
          "note": "Most successful natural product drugs are semi-synthetic; total synthesis is too expensive at scale"
        }
      ],
      "references": [
        {
          "doi": "10.1021/acs.jnatprod.9b01285",
          "note": "Newman & Cragg (2020) J Nat Prod 83:770 — natural products as sources of new drugs over the period 1981-2019"
        },
        {
          "doi": "10.1039/c2np20019h",
          "note": "Keatinge-Clay (2012) Nat Prod Rep 29:1050 — the structures of type I polyketide synthases"
        },
        {
          "doi": "10.1128/SIM.58.3.186-196.2008",
          "note": "Baltz (2008) SIM News 58:186 — renaissance in antibacterial discovery from actinomycetes"
        },
        {
          "doi": "10.1038/s41564-018-0110-1",
          "note": "Hover et al. (2018) Nat Microbiol 3:415 — culture-independent discovery of antibiotics using the iChip"
        }
      ],
      "last_reviewed": "2026-05-06",
      "file": "cross-domain/biology-chemistry/b-secondary-metabolites-drug-discovery.yaml"
    },
    {
      "id": "b-ant-colony-distributed-computation",
      "title": "Ant colony optimization (ACO) formalizes the pheromone trail mechanism of foraging ants as a distributed probabilistic graph search algorithm that finds near-optimal solutions to NP-hard combinatorial problems",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "Foraging ants deposit pheromone tau_ij on edges (i,j) of a complete graph proportional to path quality (1/L_k), and choose edges probabilistically as p_{ij} = tau_ij^alpha * eta_ij^beta / sum(tau_il^alpha * eta_il^beta), reinforcing shorter paths; the resulting pheromone distribution converges to th",
      "open_unknowns": [],
      "related_hypotheses": [],
      "communication_gap": "Biologists study foraging behavior as ecology while computer scientists use ACO as a heuristic algorithm; the formal equivalence between stigmergic behavior and parallel probabilistic search is known in computational intelligence but rarely discussed in biological foraging theory or evolutionary ecology.",
      "translation_table": [
        {
          "field_a_term": "pheromone trail strength tau_ij on ant path",
          "field_b_term": "edge weight in probabilistic graph traversal",
          "note": "Pheromone evaporation (tau -> (1-rho)*tau) prevents premature convergence; analogous to temperature in simulated annealing"
        },
        {
          "field_a_term": "stigmergic communication (indirect via environment)",
          "field_b_term": "asynchronous message passing via shared memory (blackboard architecture)",
          "note": "Ants communicate through pheromone field, not directly; equivalent to distributed agents writing/reading shared state"
        },
        {
          "field_a_term": "positive feedback in trail reinforcement",
          "field_b_term": "reinforcement learning Q-value update",
          "note": "ACO is equivalent to model-based RL where pheromone is the Q-table and environment is the TSP graph"
        },
        {
          "field_a_term": "colony-level path optimization",
          "field_b_term": "parallel stochastic search with diversity maintenance via evaporation",
          "note": "Colony is a parallel computation with N agents; diversity maintained by evaporation (anti-exploitation)"
        }
      ],
      "references": [
        {
          "doi": "10.1109/3477.484436",
          "note": "Dorigo et al. (1996) IEEE Trans Syst Man Cybern - ant system: optimization by a colony of cooperating agents"
        },
        {
          "doi": "10.1613/jair.1177",
          "note": "Dorigo & Gambardella (1997) J Artif Intell Res - ant colony system: ACO for TSP"
        },
        {
          "doi": "10.1126/science.1226406",
          "note": "Reid et al. (2011) J Exp Biol - ant colony optimization in biological contexts"
        }
      ],
      "last_reviewed": "2026-05-07",
      "file": "cross-domain/biology-computer-science/b-ant-colony-distributed-computation.yaml"
    },
    {
      "id": "b-ant-colony-optimization-x-gradient-free-metaheuristics",
      "title": "Ant colony optimization (ACO) constructs stochastic solution builders using pheromone reinforcement proportional to past solution quality — exemplifying population-based, derivative-free combinatorial optimization sharing convergence motifs with cross-entropy method and evolutionary strategies despite distinct biological narratives.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "ACO deposits artificial pheromone trails updated by evaporation and reinforcement — parallel to weighted adaptive sampling in gradient-free optimizers that allocate search budget toward low-cost regions without gradients. Formal proofs remain limited for general combinatorial instances — resembling ",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-ant-colony-optimization-x-gradient-free-metaheuristics"
      ],
      "communication_gap": "Operations research journals catalog metaheuristic zoo without insect ethology context; entomology papers report trail algorithms without complexity-theoretic references familiar to CS departments — cross-listed surveys remain fewer than independent silos would suggest.\n",
      "translation_table": [
        {
          "field_a_term": "Pheromone concentration τ_ij on graph edges",
          "field_b_term": "Adaptive sampling weight / trust-region bias toward historically strong edges",
          "note": "Both encode memory of exploration without derivative structure on discrete landscapes."
        },
        {
          "field_a_term": "Heuristic desirability η_ij (visibility)",
          "field_b_term": "Problem-specific initialization bias analogous to prior shaping in black-box search",
          "note": "Engineers tune η from domain physics similar to biologists encoding distance cues."
        },
        {
          "field_a_term": "Ant stochastic path construction",
          "field_b_term": "Parallel Monte Carlo walkers with adaptive proposal distributions",
          "note": "Shared population parallelism exploited on HPC clusters for routing and scheduling competitions."
        }
      ],
      "references": [
        {
          "doi": "10.1109/3475.585892",
          "note": "Dorigo, Maniezzo & Colorni (1996) IEEE Trans. Syst. Man Cybern. — ant system foundational paper"
        },
        {
          "doi": "10.1007/s11721-004-0071-0",
          "note": "Dorigo & Stützle (2004) Swarm Intelligence — ACO textbook lineage overview"
        }
      ],
      "last_reviewed": "2026-05-07",
      "file": "cross-domain/biology-computer-science/b-ant-colony-optimization-x-gradient-free-metaheuristics.yaml"
    },
    {
      "id": "b-ant-colony-stigmergy-aco",
      "title": "Insect swarm stigmergy — indirect coordination through environment-mediated signals such as pheromone trails — is the biological substrate from which ant colony optimisation (ACO) algorithms are derived, and the mathematical analysis of ACO convergence directly predicts which biological swarm behaviors are evolutionarily stable.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "proposed",
      "bridge_claim": "Ant colonies solve the traveling salesman problem without central control: foragers deposit pheromone on paths, and shorter paths accumulate pheromone faster (more round trips per unit time), positively reinforcing the optimal route. This stigmergic mechanism — indirect coordination through environm",
      "open_unknowns": [],
      "related_hypotheses": [],
      "communication_gap": "",
      "translation_table": [
        {
          "field_a_term": "pheromone concentration tau_{ij}",
          "field_b_term": "ACO heuristic desirability / Q-table value",
          "note": "Encodes collective memory of path quality; decays with time (evaporation)"
        },
        {
          "field_a_term": "evaporation rate rho",
          "field_b_term": "learning rate decay / temporal discount factor",
          "note": "Sets the memory horizon; biological optimum at rho ~ 0.1-0.3 for most tasks"
        },
        {
          "field_a_term": "positive feedback (short path gets more pheromone faster)",
          "field_b_term": "reward amplification in reinforcement learning",
          "note": "Both create attractor basins around good solutions; both can trap in local optima"
        },
        {
          "field_a_term": "probabilistic path selection p_{ij} ~ tau^alpha * eta^beta",
          "field_b_term": "softmax policy in RL with temperature",
          "note": "The alpha/beta parameters set exploration-exploitation balance — equivalent to RL temperature"
        }
      ],
      "references": [
        {
          "doi": "10.1109/4235.996017",
          "note": "Dorigo & Di Caro (1999) — The ant colony optimization metaheuristic"
        },
        {
          "doi": "10.1007/s11721-007-0004-y",
          "note": "Dorigo & Birattari (2007) — ant colony optimization"
        },
        {
          "doi": "10.1098/rsif.2012.0878",
          "note": "Deneubourg et al. — self-organization and optimal foraging in ants"
        }
      ],
      "last_reviewed": "2026-05-07",
      "file": "cross-domain/biology-computer-science/b-ant-colony-stigmergy-aco.yaml"
    },
    {
      "id": "b-crispr-programmable-genome-editing",
      "title": "CRISPR-Cas9 ↔ biological search-and-replace algorithm — programmable genome editing as string computation",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "CRISPR-Cas9 is a programmable biological search-and-replace algorithm operating on the genome as a character string. The guide RNA (gRNA, ~20 nucleotides) is the search pattern; Cas9 protein is the endonuclease (cut function); the PAM sequence (NGG for SpCas9) is the address delimiter that restricts",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-genome-as-distributed-write-once-storage"
      ],
      "communication_gap": "Molecular biologists and computer scientists both have deep relevant expertise but collaborate rarely on CRISPR design. Bioinformatics bridges the gap computationally (off-target prediction tools like Cas-OFFinder, CRISPOR use string-matching algorithms), but the formal computer science framing — complexity analysis, information-theoretic limits on specificity — is rarely applied. Most CRISPR engineering is empirical rather than theory-driven.\n",
      "translation_table": [
        {
          "field_a_term": "guide RNA sequence (20 nt)",
          "field_b_term": "search pattern / query string",
          "note": "The gRNA defines which genomic address is targeted; changing it reprograms the cut site"
        },
        {
          "field_a_term": "Cas9 endonuclease",
          "field_b_term": "cut / write function",
          "note": "Cas9 executes the edit once the gRNA has located the match"
        },
        {
          "field_a_term": "PAM sequence (NGG)",
          "field_b_term": "address delimiter / restriction site",
          "note": "PAM must immediately follow the protospacer; limits addressable sites to ~1 in 8 bp"
        },
        {
          "field_a_term": "base-pair complementarity (Watson-Crick)",
          "field_b_term": "exact string matching",
          "note": "The thermodynamic energy of hybridisation is the matching score"
        },
        {
          "field_a_term": "off-target editing (mismatches tolerated)",
          "field_b_term": "approximate string matching (fuzzy search)",
          "note": "Cas9 tolerates up to ~5 mismatches — analogous to edit distance tolerance in BLAST"
        },
        {
          "field_a_term": "multiplexed editing (multiple gRNAs)",
          "field_b_term": "parallel multi-pattern search (Aho-Corasick)",
          "note": "Multiple gRNAs can be co-expressed for simultaneous edits at independent loci"
        },
        {
          "field_a_term": "prime editing pegRNA (spacer + RT template)",
          "field_b_term": "find-and-replace with specified replacement string",
          "note": "pegRNA encodes both the search sequence and the replacement — full string rewrite"
        }
      ],
      "references": [
        {
          "doi": "10.1126/science.1225829",
          "note": "Doudna & Charpentier (2012) Science 337:816 — CRISPR-Cas9 as programmable endonuclease"
        },
        {
          "doi": "10.1126/science.1231143",
          "note": "Cong et al. (2013) Science 339:819 — genome editing in eukaryotic cells"
        },
        {
          "doi": "10.1016/j.cell.2014.05.010",
          "note": "Hsu et al. (2014) Cell 157:1262 — off-target effects and specificity"
        },
        {
          "doi": "10.1038/s41586-019-1711-4",
          "note": "Anzalone et al. (2019) Nature 576:149 — prime editing (find-and-replace)"
        }
      ],
      "last_reviewed": "2026-05-06",
      "file": "cross-domain/biology-computer-science/b-crispr-programmable-genome-editing.yaml"
    },
    {
      "id": "b-dna-origami-scaffold-routing-x-staged-compilation-analogy",
      "title": "DNA origami scaffold routing and staged compilation share a constrained-assembly logic: a global design is decomposed into local binding or dependency steps whose ordering controls yield, error propagation, and debuggability, though the compiler analogy is explicitly speculative.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "proposed",
      "bridge_claim": "The bridge is a labeled metaphor for design practice, not a mechanistic equivalence. Scaffold path constraints, staple crossovers, and annealing schedules can be described like dependency graphs and staged compilation passes, suggesting measurable software-style complexity metrics for origami yield ",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-scaffold-routing-constraint-metrics-predict-origami-yield"
      ],
      "communication_gap": "DNA nanotechnology uses wet-lab yield and microscopy language, while compiler engineers reason about dependency graphs, static checks, and staged lowering.\n",
      "translation_table": [
        {
          "field_a_term": "scaffold path and staple constraints",
          "field_b_term": "dependency graph and intermediate representation",
          "note": "Both expose local constraints in a global assembly."
        },
        {
          "field_a_term": "annealing schedule",
          "field_b_term": "staged compilation or pass ordering",
          "note": "Ordering controls failure modes."
        },
        {
          "field_a_term": "misfolded intermediate",
          "field_b_term": "failed build artifact or unresolved dependency",
          "note": "The analogy is pedagogical and must be tested."
        }
      ],
      "references": [
        {
          "doi": "10.1038/nature04586",
          "note": "Rothemund (2006) DNA origami by scaffolded folding."
        }
      ],
      "last_reviewed": "2026-05-07",
      "file": "cross-domain/biology-computer-science/b-dna-origami-scaffold-routing-x-staged-compilation-analogy.yaml"
    },
    {
      "id": "b-flocking-reynolds-boids-alignment",
      "title": "Animal flocking emerges from three local interaction rules - separation, alignment, cohesion - first encoded by Reynolds' boids algorithm and subsequently formalised in the Vicsek model as a phase transition in collective alignment, bridging biological collective behavior, computer graphics, and statistical physics of active matter.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "Reynolds (1987) showed that realistic flocking arises from three steering behaviours: avoid crowding (separation), steer toward average heading (alignment), steer toward average position (cohesion). The Vicsek model (1995) stripped this to the alignment rule alone and showed it undergoes a phase tra",
      "open_unknowns": [],
      "related_hypotheses": [],
      "communication_gap": "Computer graphics researchers developed boids for animation without connecting to statistical physics phase transitions; biologists studying collective behaviour rarely formalise universality class; statistical physicists studying active matter have different notation and emphasis than biologists. Three-way disciplinary gap requires simultaneous expertise in simulation, animal tracking, and statistical physics.\n",
      "translation_table": [
        {
          "field_a_term": "alignment rule (computer science / Reynolds boids)",
          "field_b_term": "velocity matching with neighbours in animal flocks (biology)",
          "note": "Birds match velocity with k ~ 7 topological neighbours; fish with metric radius"
        },
        {
          "field_a_term": "Vicsek order parameter phi = |mean velocity| / speed (statistical physics)",
          "field_b_term": "flock polarisation / collective order (biology)",
          "note": "phi = 1 (perfect alignment) to 0 (disordered); measurable by tracking individual trajectories"
        },
        {
          "field_a_term": "noise threshold eta_c / phase transition (statistical physics)",
          "field_b_term": "flock breakup at high density perturbation or predator attack (biology)",
          "note": "Predator attacks drive the system through eta_c transiently; flock reforms below threshold"
        },
        {
          "field_a_term": "topological vs. metric interaction range (computer science)",
          "field_b_term": "topological neighbours in starling murmurations (biology)",
          "note": "Topological interactions (k nearest) change the universality class from Vicsek metric model"
        }
      ],
      "references": [
        {
          "doi": "10.1145/37401.37406",
          "note": "Reynolds (1987) - flocks, herds, and schools; a distributed behavioral model"
        },
        {
          "doi": "10.1103/PhysRevLett.75.1226",
          "note": "Vicsek et al. (1995) - novel type of phase transitions in a system of self-driven particles"
        },
        {
          "doi": "10.1073/pnas.0711437105",
          "note": "Ballerini et al. (2008) - interaction ruling animal collective behavior depends on topological rather than metric distance"
        }
      ],
      "last_reviewed": "2026-05-07",
      "file": "cross-domain/biology-computer-science/b-flocking-reynolds-boids-alignment.yaml"
    },
    {
      "id": "b-gene-regulatory-networks-boolean-logic",
      "title": "Kauffman's Boolean network model maps gene regulatory circuits onto digital logic gates, predicting that cell types correspond to dynamical attractors and that the number of cell types scales as √N_genes for critical K=2 networks — a cross-domain insight connecting combinatorial logic theory to developmental cell biology.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "Boolean network models (Kauffman 1969): genes are binary nodes (on/off), each receiving K regulatory inputs and computing a Boolean function of those inputs. The entire N-gene network is a finite deterministic automaton with 2^N states; from any initial state, the trajectory leads to a cycle (attrac",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-critical-boolean-network-cell-type-count"
      ],
      "communication_gap": "Kauffman published in Journal of Theoretical Biology (biology journal); the formal theory of Boolean satisfiability and phase transitions was developed in computer science (STOC, FOCS conferences) and statistical physics simultaneously but without citation exchange. Boolean network theory in biology was largely descriptive; computer scientists who could have provided rigorous attractor analysis tools rarely engaged with biological applications. The ENCODE project reframed the regulatory genome but did not connect to Boolean network theory. Biologists trained in molecular biology rarely learn about Boolean satisfiability, and computer scientists rarely encounter Kauffman's model.\n",
      "translation_table": [
        {
          "field_a_term": "gene node (on/off expression state)",
          "field_b_term": "Boolean variable (0/1 logic gate input)"
        },
        {
          "field_a_term": "transcription factor regulatory function",
          "field_b_term": "Boolean logic gate (AND, OR, NOT, XOR function of inputs)"
        },
        {
          "field_a_term": "cell type (stable gene expression pattern)",
          "field_b_term": "attractor of the Boolean automaton (fixed point or limit cycle)"
        },
        {
          "field_a_term": "cell differentiation (transition between cell types)",
          "field_b_term": "perturbation-driven transition between attractor basins"
        },
        {
          "field_a_term": "developmental canalization (Waddington landscape)",
          "field_b_term": "basin of attraction (size ∝ canalization strength)"
        },
        {
          "field_a_term": "oncogenic transformation (aberrant cell state)",
          "field_b_term": "spurious attractor (not in normal attractor set)"
        },
        {
          "field_a_term": "K connectivity (average regulatory inputs per gene)",
          "field_b_term": "fan-in of logic gate (inputs per gate in circuit)"
        },
        {
          "field_a_term": "critical K=2 (edge of chaos)",
          "field_b_term": "critical Boolean satisfiability threshold (phase transition at K=2 per variable)"
        }
      ],
      "references": [
        {
          "doi": "10.1016/0022-5193(69)90015-0",
          "note": "Kauffman (1969) J Theor Biol 22:437 — Boolean network model of gene regulation"
        },
        {
          "doi": "10.1209/0295-5075/1/2/001",
          "note": "Derrida & Pomeau (1986) Europhys Lett 1:45 — damage spreading, phase transition in RBNs"
        },
        {
          "doi": "10.1103/RevModPhys.74.47",
          "note": "Albert & Barabási (2002) Rev Mod Phys 74:47 — statistical mechanics of complex networks"
        },
        {
          "doi": "10.1038/nature11232",
          "note": "ENCODE Project Consortium (2012) Nature 489:57 — 85% of genome functional"
        }
      ],
      "last_reviewed": "2026-05-06",
      "file": "cross-domain/biology-computer-science/b-gene-regulatory-networks-boolean-logic.yaml"
    },
    {
      "id": "b-kauffman-boolean-x-gene-network-attractor-stability",
      "title": "Kauffman random Boolean networks exhibit ordered, chaotic, and critical regimes depending on connectivity K and bias p — mapping conceptually onto discrete models of gene regulation where attractors correspond to cell types / stable expression patterns and stability margins mirror canalization against genetic noise.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "proposed",
      "bridge_claim": "In RBNs each gene updates as a Boolean function of K regulators; for random ensembles the average influence determines whether dynamics freeze into attractors (ordered), wander ergodically (chaotic), or sit near criticality (edge of chaos hypothesis). Gene regulatory networks are not literal Boolean",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-kauffman-boolean-x-gene-network-attractor-stability"
      ],
      "communication_gap": "Classical RBN papers publish in physics journals; molecular genetics emphasizes empirical circuits (yeast, fly) with sparse appeal to Boolean universality classes without quantitative parameter matching.\n",
      "translation_table": [
        {
          "field_a_term": "Boolean update functions (NK/RBN)",
          "field_b_term": "Combinatorial transcription-factor binding enabling Hill-function sharp thresholds",
          "note": "Logical approximation coarse-grains continuous transcription into ON/OFF dynamics on slow timescales."
        },
        {
          "field_a_term": "Attractors of discrete dynamics",
          "field_b_term": "Stable phenotypes / expression steady states in GRNs",
          "note": "Count and basin sizes hypothesized to relate to cell-type repertoire diversity."
        },
        {
          "field_a_term": "Critical K where phase transition occurs",
          "field_b_term": "Regulatory connectivity permitting robust development vs chaotic differentiation",
          "note": "Experimental estimation of effective K remains challenging for mammalian networks."
        }
      ],
      "references": [
        {
          "doi": "10.1038/342467a0",
          "note": "Kauffman (1989) Nature — origins of order in randomly wired networks"
        },
        {
          "doi": "10.1016/S0303-2647(02)00079-0",
          "note": "Bornholdt (2005) Boolean network models review trajectory"
        }
      ],
      "last_reviewed": "2026-05-07",
      "file": "cross-domain/biology-computer-science/b-kauffman-boolean-x-gene-network-attractor-stability.yaml"
    },
    {
      "id": "b-regulatory-networks-boolean-sat",
      "title": "Gene regulatory network behavior under combinatorial transcription factor inputs maps onto Boolean satisfiability (SAT), making the computation of network steady states NP-complete in general and connecting systems biology to theoretical computer science.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "Stuart Kauffman's Boolean network model assigns each gene a Boolean function of its regulators; finding the attractors (stable gene expression states) of a Boolean regulatory network with N genes and K inputs per gene is equivalent to solving a system of Boolean equations—a form of SAT—with attracto",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-kauffman-network-criticality-cell-types"
      ],
      "communication_gap": "Systems biologists build regulatory network models using differential equations or Boolean logic while computer scientists develop SAT algorithms; the NP-hardness of attractor finding in Boolean networks has been known since the 2000s but modern SAT-solver technology has not been systematically applied to biological network attractor enumeration at genome scale.\n",
      "translation_table": [
        {
          "field_a_term": "gene regulatory network attractor (systems biology)",
          "field_b_term": "satisfying assignment in Boolean SAT (computer science)",
          "note": "A stable cell state corresponds to a fixed point of the Boolean dynamics, i.e., a SAT solution"
        },
        {
          "field_a_term": "Boolean update function per gene f_i(x_1,...,x_K) (systems biology)",
          "field_b_term": "clause in CNF formula (computer science)",
          "note": "Each gene's regulation logic is a Boolean function that must be simultaneously satisfied"
        },
        {
          "field_a_term": "cell fate transition (systems biology)",
          "field_b_term": "movement between SAT solutions in the Boolean space (computer science)",
          "note": "Waddington landscape attractors are separated by unsatisfied constraint regions"
        },
        {
          "field_a_term": "feedback loop in regulatory network (systems biology)",
          "field_b_term": "clause cycle / unit propagation structure (SAT)",
          "note": "Positive feedback loops stabilize attractors; negative feedback creates oscillatory solutions"
        }
      ],
      "references": [
        {
          "doi": "10.1126/science.161.3843.802",
          "note": "Kauffman (1969) - metabolic stability and epigenesis in randomly constructed genetic nets"
        },
        {
          "doi": "10.1007/978-3-540-25974-4_17",
          "note": "Akutsu et al. (2000) - NP-hardness of finding attractors in Boolean networks"
        },
        {
          "doi": "10.1371/journal.pcbi.1000702",
          "note": "Saez-Rodriguez et al. (2011) - logic-based models for signaling network analysis"
        }
      ],
      "last_reviewed": "2026-05-07",
      "file": "cross-domain/biology-computer-science/b-regulatory-networks-boolean-sat.yaml"
    },
    {
      "id": "b-rna-secondary-structure-x-planar-graphs",
      "title": "RNA secondary structure prediction treats base pairing as a non-crossing (planar) graph optimization problem, linking molecular biology to dynamic programming on trees and planar matchings.",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "The Nussinov–Jacobson and related DP algorithms maximize weighted base pairings subject to non-crossing constraints, yielding a planar graph representation of secondary structure. More general structures with pseudoknots require non-planar graphs and raise computational complexity. The bridge connec",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-nussinov-energy-approximates-planar-graph-parsimony"
      ],
      "communication_gap": "Biologists discuss dot-bracket notation; algorithms researchers discuss parse forests. Tooling hides the graph structure, so errors in pseudoknot handling propagate silently in design pipelines.",
      "translation_table": [
        {
          "field_a_term": "non-crossing arcs",
          "field_b_term": "planar matching / outerplanar graph"
        },
        {
          "field_a_term": "recurrence on subintervals [i,j]",
          "field_b_term": "CYK-like decomposition for RNA folding"
        },
        {
          "field_a_term": "pseudoknot",
          "field_b_term": "non-planar crossing base pairs requiring higher complexity"
        }
      ],
      "references": [
        {
          "doi": "10.1016/0022-2836(80)90363-8",
          "note": "Nussinov & Jacobson (1980) — fast algorithm for predicting secondary structure"
        },
        {
          "doi": "10.1093/nar/9.1.133",
          "note": "Zuker & Stiegler (1981) — energy-based folding with DP (thermodynamic refinement bridge)"
        }
      ],
      "last_reviewed": "2026-05-07",
      "file": "cross-domain/biology-computer-science/b-rna-secondary-structure-x-planar-graphs.yaml"
    },
    {
      "id": "b-signal-transduction-boolean-network-attractors",
      "title": "Intracellular signal transduction networks behave as Boolean networks whose attractors correspond to stable cell fates, mapping cell-state decisions onto the computational theory of finite-state automata and attractor basins.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "A signal transduction network can be abstracted as a Boolean network: each protein is a node (active=1, inactive=0) whose state is updated by a logical rule derived from biochemical interactions. Fixed-point attractors correspond to stable cell states (differentiated phenotypes, apoptosis, prolifera",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-boolean-attractor-cell-fate-landscape"
      ],
      "communication_gap": "Cell biologists studying signal transduction and computer scientists studying Boolean networks publish in largely separate venues; the Kauffman NK model tradition bridged them, but most experimental biologists are unfamiliar with attractor analysis tools, and most computer scientists are unaware of the biological validation literature.\n",
      "translation_table": [
        {
          "field_a_term": "phosphorylation state of signaling protein (cell biology)",
          "field_b_term": "binary node state in Boolean network (computer science)",
          "note": "Active (phosphorylated) = 1; inactive = 0; threshold-linear kinetics justify binarization"
        },
        {
          "field_a_term": "stable differentiated cell state (cell biology)",
          "field_b_term": "fixed-point attractor of Boolean network (computer science)",
          "note": "Waddington's epigenetic landscape valleys correspond to attractor basins"
        },
        {
          "field_a_term": "signal integration by kinase cascade (cell biology)",
          "field_b_term": "Boolean function / logic gate (computer science)",
          "note": "AND/OR/NOT logic gates capture cooperative and competitive signaling interactions"
        },
        {
          "field_a_term": "cell fate decision (proliferate vs. differentiate vs. apoptose) (cell biology)",
          "field_b_term": "attractor reached from initial condition (computer science)",
          "note": "The attractor basin boundary is the decision boundary in gene-expression state space"
        }
      ],
      "references": [
        {
          "doi": "10.1093/bioinformatics/btq504",
          "note": "Chaouiya (2007) — Petri nets and Boolean models for signal transduction"
        },
        {
          "doi": "10.1038/nature06554",
          "note": "Huang et al. (2009) — cancer attractors in gene regulatory networks"
        },
        {
          "doi": "10.1371/journal.pcbi.1000292",
          "note": "Saez-Rodriguez et al. (2009) — Boolean network model of T cell receptor signaling"
        }
      ],
      "last_reviewed": "2026-05-07",
      "file": "cross-domain/biology-computer-science/b-signal-transduction-boolean-network-attractors.yaml"
    },
    {
      "id": "b-transformer-attention-x-protein-language-model-fitness-prediction",
      "title": "Transformer attention mechanisms connect sequence modeling advances with protein fitness prediction pipelines.",
      "source_domain": "",
      "target_domain": "",
      "status": "proposed",
      "bridge_claim": "Speculative analogy: Attention-based sequence modeling can encode long-range residue dependencies relevant to protein fitness landscapes.",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-attention-regularized-protein-language-models-improve-fitness-ranking"
      ],
      "communication_gap": "Protein engineering focuses on experimental throughput, while transformer research often reports language-model objectives disconnected from wet-lab utility.",
      "translation_table": [],
      "references": [
        {
          "arxiv": "1706.03762",
          "note": "Attention Is All You Need."
        }
      ],
      "last_reviewed": "2026-05-08",
      "file": "cross-domain/biology-computer-science/b-transformer-attention-x-protein-language-model-fitness-prediction.yaml"
    },
    {
      "id": "b-bacterial-chemotaxis-x-gradient-descent",
      "title": "Bacterial chemotaxis x Gradient descent - run-and-tumble as stochastic optimization\n",
      "source_domain": "",
      "target_domain": "",
      "status": "proposed",
      "bridge_claim": "E. coli chemotaxis (biased random walk toward chemical attractants via run-and-tumble motion) implements stochastic gradient ascent on the chemoattractant concentration field; the methylation-based memory system is an adaptive temporal integrator that computes the temporal gradient of concentration ",
      "open_unknowns": [],
      "related_hypotheses": [],
      "communication_gap": "Microbiologists studying chemotaxis signaling (Adler 1966, Berg & Brown 1972) and optimization researchers developing gradient descent (Robbins & Monro 1951) built separate frameworks for directed search; the algorithmic interpretation of chemotaxis as gradient ascent was formalized by Mello & Tu (2003) and connects to modern machine learning through the exact parallel between methylation adaptation and momentum-based optimizers — a connection rarely exploited in algorithm design.\n",
      "translation_table": [
        {
          "field_a_term": "tumbling probability modulation by CheY phosphorylation (microbiology)",
          "field_b_term": "step size / learning rate modulation in stochastic gradient ascent (optimization)",
          "note": "High [attractant gradient] decreases tumbling frequency, increasing run length — equivalent to increasing step size in favorable direction"
        },
        {
          "field_a_term": "methylation state of chemoreceptors (memory system) (biochemistry)",
          "field_b_term": "momentum term / exponential moving average of gradients (Adam optimizer)",
          "note": "Receptor methylation integrates recent concentration history, implementing a low-pass filter equivalent to momentum in gradient descent"
        },
        {
          "field_a_term": "run-and-tumble random walk (bacterial motility)",
          "field_b_term": "stochastic gradient descent with random noise (optimization)",
          "note": "Tumbling introduces randomness analogous to SGD noise; the bias in run duration creates net drift up the gradient"
        },
        {
          "field_a_term": "adaptation to background concentration (receptor adaptation) (biochemistry)",
          "field_b_term": "adaptive learning rate / gradient normalization (optimization)",
          "note": "Perfect adaptation normalizes the signal to the background, equivalent to gradient normalization (RMSprop) in adaptive optimizers"
        }
      ],
      "references": [
        {
          "doi": "10.1038/nrm1503",
          "note": "Wadhams & Armitage (2004) - Making sense of it all: bacterial chemotaxis; Nature Rev Mol Cell Biol 5:1024"
        }
      ],
      "last_reviewed": "2026-05-07",
      "file": "cross-domain/biology-cs/b-bacterial-chemotaxis-x-gradient-descent.yaml"
    },
    {
      "id": "b-biomechanics-x-soft-robotics",
      "title": "Biomechanics x Soft Robotics — compliant mechanisms as muscle-tendon analogs\n",
      "source_domain": "",
      "target_domain": "",
      "status": "proposed",
      "bridge_claim": "Biological muscle-tendon units (series elastic actuators) store and release elastic energy during locomotion, reducing metabolic cost below that predicted by rigid-body models; soft robotic actuators (pneumatic artificial muscles, dielectric elastomers) replicate this compliance-energy storage trade",
      "open_unknowns": [],
      "related_hypotheses": [],
      "communication_gap": "Biomechanics developed muscle-tendon models (Hill 1938, Roberts et al. 1997) independently of robotics which used rigid-body dynamics; the transfer of elastic energy storage principles to robotics was driven by the running robots community (Raibert 1986, Full & Koditschek 1999) but soft robotics adopted compliant mechanism theory only in the 2010s when pneumatic soft actuators demonstrated octopus-inspired locomotion.\n",
      "translation_table": [
        {
          "field_a_term": "Tendon compliance (series elastic element)",
          "field_b_term": "Compliant mechanism spring element in robotics",
          "note": "Achilles tendon stores ~35% of stride energy as elastic strain energy, releasing it in the push-off phase; series elastic actuators (SEA) in robotics replicate this by coupling a spring between motor and output — enabling passive energy storage and force control.\n"
        },
        {
          "field_a_term": "Metabolic cost reduction (elastic energy recycling)",
          "field_b_term": "Efficiency gain from compliance in locomotion",
          "note": "The spring-mass model of running shows that elastic energy storage reduces metabolic cost by up to 50% vs. rigid-body models; robotic bipeds with compliant legs (ATRIAS, Cassie) achieve similar efficiency by tuning spring stiffness to match natural frequency of locomotion.\n"
        },
        {
          "field_a_term": "Muscle force-length-velocity relationship",
          "field_b_term": "Actuator characteristics in soft robotic design",
          "note": "The Hill muscle model (force = F(L)·F(v)·F_max) characterizes muscle actuator properties; pneumatic McKibben actuators and dielectric elastomers have analogous force-displacement-velocity characteristics that can be matched to Hill model parameters.\n"
        },
        {
          "field_a_term": "Distributed compliance (invertebrate locomotion)",
          "field_b_term": "Soft continuum robot design",
          "note": "Octopus arms and caterpillar locomotion use distributed compliance throughout the body rather than concentrated joints; soft continuum robots replicate this architecture using elastomeric materials, enabling compliant interaction with unstructured environments.\n"
        }
      ],
      "references": [
        {
          "doi": "10.1088/1748-3190/aa6b9b",
          "note": "Rus & Tolley (2015) — Design, fabrication and control of soft robots; Nature 521:467"
        }
      ],
      "last_reviewed": "2026-05-07",
      "file": "cross-domain/biology-cs/b-biomechanics-x-soft-robotics.yaml"
    },
    {
      "id": "b-circadian-clock-x-feedback-oscillator",
      "title": "Circadian clock ↔ Feedback oscillator — TTFL as relaxation oscillator",
      "source_domain": "",
      "target_domain": "",
      "status": "proposed",
      "bridge_claim": "The transcription-translation feedback loop (TTFL) of circadian clocks (CLOCK-BMAL1/PER-CRY) is a biological relaxation oscillator whose period is set by protein degradation time constants; it is mathematically equivalent to a van der Pol oscillator with negative feedback delay, enabling entrainment",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-circadian-clock-x-feedback-oscillator"
      ],
      "communication_gap": "Circadian biology developed as a molecular genetics field; the TTFL was discovered by genetic screens (Hall, Rosbash, Young — Nobel 2017) without reference to oscillator circuit theory. Control engineers studying biological oscillators (Goodwin 1965, Goldbeter 1995) worked in applied mathematics journals rarely read by circadian biologists, who focused on identifying molecular components rather than circuit dynamics.",
      "translation_table": [
        {
          "field_a_term": "CLOCK-BMAL1 transcriptional activator complex (circadian biology)",
          "field_b_term": "positive feedback arm of oscillator circuit (control engineering)",
          "note": "CLOCK-BMAL1 activates PER/CRY expression — the positive phase of the oscillator"
        },
        {
          "field_a_term": "PER-CRY repressor complex (circadian biology)",
          "field_b_term": "negative feedback element with time delay τ (control engineering)",
          "note": "PER-CRY represses CLOCK-BMAL1; delay τ ≈ 6-8 hours (protein synthesis/degradation)"
        },
        {
          "field_a_term": "CKIε/δ phosphorylation rate of PER protein (circadian biology)",
          "field_b_term": "nonlinear damping coefficient μ in van der Pol oscillator",
          "note": "CKI phosphorylation rate sets period; mutations change period from 20 to 28 hours"
        },
        {
          "field_a_term": "light pulse entrainment (via CRY degradation pathway)",
          "field_b_term": "phase resetting curve (PRC) input to forced oscillator",
          "note": "Light input shifts PRC; jet lag is transient due to oscillator damping"
        }
      ],
      "references": [
        {
          "doi": "10.1016/j.cell.2017.05.015",
          "note": "Takahashi (2017) — transcriptional architecture of the mammalian circadian clock; Nature Rev Genet"
        },
        {
          "doi": "10.1016/S0006-3495(65)86707-X",
          "note": "Goodwin (1965) — oscillatory behaviour in enzymatic control processes; Adv Enzyme Regul 3:425"
        }
      ],
      "last_reviewed": "2026-05-07",
      "file": "cross-domain/biology-cs/b-circadian-clock-x-feedback-oscillator.yaml"
    },
    {
      "id": "b-crispr-base-editing-x-error-correction",
      "title": "CRISPR Base Editing x Error Correction - adenine base editor as bit-flip corrector\n",
      "source_domain": "",
      "target_domain": "",
      "status": "proposed",
      "bridge_claim": "Adenine base editors (ABEs) convert A-T to G-C base pairs without double-strand breaks, implementing a precise one-bit correction in the genomic information channel; the specificity window (protospacer positions 4-8) is analogous to a convolutional code's constraint length, and off-target edits are ",
      "open_unknowns": [],
      "related_hypotheses": [],
      "communication_gap": "Information theory and molecular genetics have been connected conceptually (Gamow's coding problem, Shannon's channel model) but the specific analogy between base editing and error correction operations is underexplored; it opens the possibility of designing guide RNA sequences to minimize off-target errors using coding theory principles.\n",
      "translation_table": [
        {
          "field_a_term": "Genomic DNA sequence (4-letter alphabet, ~3 billion bp)",
          "field_b_term": "Encoded message in a noisy information channel",
          "note": "The genome is a high-capacity information storage medium; base substitutions (mutations, sequencing errors) are channel noise; base editing is targeted error correction that changes one specific symbol without erasing context.\n"
        },
        {
          "field_a_term": "Adenine base editor (ABE7.10, ABE8e)",
          "field_b_term": "Bit-flip error corrector (single-bit decoder)",
          "note": "ABE converts A (0) to G (1) at a target site with >99% efficiency; this is functionally equivalent to a targeted bit-flip error correction step that corrects a known single-site error without affecting surrounding bits.\n"
        },
        {
          "field_a_term": "Protospacer activity window (positions 4-8, counting from PAM)",
          "field_b_term": "Constraint length of convolutional decoder",
          "note": "The editing window is an intrinsic property of the base editor deaminase domain; it limits which positions can be corrected, analogous to the constraint length of a convolutional code that determines which bits can be independently decoded.\n"
        },
        {
          "field_a_term": "Off-target editing (unintended sites with partial guide match)",
          "field_b_term": "Decoder symbol error rate (residual errors after decoding)",
          "note": "Off-target edits occur at genomic sites with sequence similarity to the guide RNA target; the specificity (on-target / off-target ratio) measures the effective error rate of the base editing decoder.\n"
        }
      ],
      "references": [
        {
          "doi": "10.1038/nature24644",
          "note": "Gaudelli et al. (2017) - programmable base editing of A-T to G-C in genomic DNA without DNA cleavage; Nature 551:464"
        },
        {
          "doi": "10.1038/s41587-019-0009-z",
          "note": "Komor et al. (2016) - programmable editing of a target base in genomic DNA without double-stranded DNA cleavage; Nature 533:420"
        }
      ],
      "last_reviewed": "2026-05-07",
      "file": "cross-domain/biology-cs/b-crispr-base-editing-x-error-correction.yaml"
    },
    {
      "id": "b-crispr-x-search-and-replace",
      "title": "CRISPR-Cas9 x String search algorithms — guide RNA as regex pattern matching\n",
      "source_domain": "",
      "target_domain": "",
      "status": "proposed",
      "bridge_claim": "CRISPR-Cas9 genome editing performs exact string matching (PAM-adjacent target search) and substitution (cut-and-repair) on a 3-billion-character string (the human genome); guide RNA specificity follows the same mismatched-seed-region rules as approximate string matching algorithms, enabling enginee",
      "open_unknowns": [],
      "related_hypotheses": [],
      "communication_gap": "Molecular biologists developing CRISPR tools and computer scientists developing sequence alignment algorithms use the same mathematical framework (approximate string matching, edit distance) but rarely cite each other; off-target prediction tools (Cas-OFFinder, CRISPOR) implement bioinformatics algorithms but are not designed with formal string matching complexity bounds in mind.\n",
      "translation_table": [
        {
          "field_a_term": "guide RNA (gRNA) 20-nt spacer (molecular biology)",
          "field_b_term": "search pattern / query string (computer science)",
          "note": "The 20-nt gRNA is the query; the genome is the database; PAM (NGG) is the anchor that enables rapid scanning"
        },
        {
          "field_a_term": "PAM recognition site (CRISPR biology)",
          "field_b_term": "mandatory suffix constraint in string matching (CS)",
          "note": "Cas9 first searches for PAM sites, then checks gRNA complementarity — analogous to anchored approximate string matching with suffix filter"
        },
        {
          "field_a_term": "seed region mismatch tolerance (CRISPR)",
          "field_b_term": "edit distance in approximate string matching (CS)",
          "note": "Mismatches in the 12-nt PAM-proximal seed region strongly reduce Cas9 cleavage; this mirrors the higher penalty for mismatches near query anchors in BWT-based aligners"
        },
        {
          "field_a_term": "homology-directed repair (HDR) template (biology)",
          "field_b_term": "replacement string in find-and-replace (CS)",
          "note": "The donor DNA template specifies the replacement sequence; HDR is the biological equivalent of regex substitution with a specified replacement"
        }
      ],
      "references": [
        {
          "doi": "10.1126/science.1225829",
          "note": "Jinek et al. (2012) - A programmable dual-RNA-guided DNA endonuclease in adaptive bacterial immunity; Science 337:816"
        }
      ],
      "last_reviewed": "2026-05-07",
      "file": "cross-domain/biology-cs/b-crispr-x-search-and-replace.yaml"
    },
    {
      "id": "b-gene-expression-noise-x-information-theory",
      "title": "Gene Expression Noise x Information Theory - transcriptional channel capacity\n",
      "source_domain": "",
      "target_domain": "",
      "status": "proposed",
      "bridge_claim": "Gene regulatory networks face a fundamental channel capacity limit: the maximum mutual information between transcription factor concentration (input) and target gene expression (output) is bounded by the noise in the system; cells near this limit implement near-optimal regulatory strategies predicte",
      "open_unknowns": [],
      "related_hypotheses": [],
      "communication_gap": "Information-theoretic analysis of gene regulation was pioneered by Tkacik and Walczak around 2008 but has not been widely adopted by molecular biologists who typically analyze single gene-pair interactions rather than information flows across regulatory networks.\n",
      "translation_table": [
        {
          "field_a_term": "Transcription factor concentration (input signal)",
          "field_b_term": "Channel input X",
          "note": "The concentration of an activating or repressing transcription factor is the input variable; its distribution (set by upstream signaling) determines the prior over the channel input.\n"
        },
        {
          "field_a_term": "Target gene mRNA/protein level (output)",
          "field_b_term": "Channel output Y",
          "note": "The resulting expression level of the regulated gene is the noisy channel output; intrinsic noise (transcriptional bursting) and extrinsic noise (cell-to-cell variation in TF copy number) together define the channel noise.\n"
        },
        {
          "field_a_term": "Cell-to-cell expression variability (noise, eta^2)",
          "field_b_term": "Channel noise power",
          "note": "The Fano factor and coefficient of variation of expression across cells set the effective SNR of the regulatory channel; higher noise compresses the capacity below the noiseless log2(n_states) bound.\n"
        },
        {
          "field_a_term": "Optimal regulatory response (Hill function shape)",
          "field_b_term": "Channel capacity-achieving input distribution",
          "note": "The input TF distribution that maximizes mutual information matches the experimentally observed TF concentration distribution in bacteria and yeast, suggesting evolution has tuned regulatory strategies toward capacity.\n"
        }
      ],
      "references": [
        {
          "doi": "10.1073/pnas.0604883103",
          "note": "Tkacik, Walczak & Bialek (2008) - information flow and optimization in gene expression; PNAS 105:12265"
        },
        {
          "doi": "10.1016/j.cell.2013.02.039",
          "note": "Eldar & Elowitz (2010) - functional roles for noise in genetic circuits; Nature 467:167"
        }
      ],
      "last_reviewed": "2026-05-07",
      "file": "cross-domain/biology-cs/b-gene-expression-noise-x-information-theory.yaml"
    },
    {
      "id": "b-gene-regulatory-network-x-boolean-circuit",
      "title": "Gene regulatory networks ↔ Boolean circuits — transcription factor logic as AND/OR gates",
      "source_domain": "",
      "target_domain": "",
      "status": "proposed",
      "bridge_claim": "Transcription factor combinatorics implement Boolean logic: cooperative binding is AND, competitive binding is NOT, and OR gates arise from redundant enhancers; Kauffman's NK random Boolean network model shows that developmental gene networks operate near the critical (chaotic-to-ordered) phase tran",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-gene-regulatory-network-x-boolean-circuit"
      ],
      "communication_gap": "Boolean network models of gene regulation (Kauffman 1969) were developed in theoretical biology and ignored by molecular geneticists for decades, who viewed abstract Boolean models as over-simplified. Molecular biologists studying transcription factor logic in Drosophila (Levine, Mann, Small) developed enhancer logic rules empirically without reference to Boolean circuit theory. The synthesis required systems biology (Davidson 2001) to formalise gene regulatory network logic diagrams.",
      "translation_table": [
        {
          "field_a_term": "cooperative transcription factor binding (both TFs required for activation)",
          "field_b_term": "Boolean AND gate (output 1 iff both inputs 1)",
          "note": "Synergistic activation requires cofactor TF binding within same enhancer module"
        },
        {
          "field_a_term": "competitive/repressive TF binding (repressor blocks activator)",
          "field_b_term": "Boolean NOT/NAND gate (repressor inverts activator logic)",
          "note": "Repressor-activator competition implements NOT; combined with AND gives NAND — universal gate"
        },
        {
          "field_a_term": "shadow enhancer (redundant enhancer elements for same target gene)",
          "field_b_term": "Boolean OR gate (output 1 if any input is 1)",
          "note": "Redundant enhancers are OR logic gates; increase robustness to enhancer mutation"
        },
        {
          "field_a_term": "NK random Boolean network (N genes, K inputs per gene)",
          "field_b_term": "random Boolean circuit of N gates with in-degree K",
          "note": "Kauffman's NK model: ordered phase (K < 2), critical (K = 2), chaotic (K > 2)"
        }
      ],
      "references": [
        {
          "doi": "10.1016/0022-5193(69)90015-0",
          "note": "Kauffman (1969) — metabolic stability and epigenesis in randomly constructed genetic nets; J Theor Biol 22:437"
        },
        {
          "doi": "10.1016/j.cell.2002.09.005",
          "note": "Davidson et al. (2002) — gene regulatory network for sea urchin endomesoderm specification; Science"
        }
      ],
      "last_reviewed": "2026-05-07",
      "file": "cross-domain/biology-cs/b-gene-regulatory-network-x-boolean-circuit.yaml"
    },
    {
      "id": "b-immune-memory-x-long-term-potentiation",
      "title": "Immune Memory x Long-Term Potentiation — B-cell affinity maturation as memory consolidation\n",
      "source_domain": "",
      "target_domain": "",
      "status": "proposed",
      "bridge_claim": "B-cell affinity maturation in germinal centers (iterative mutation → selection → clonal expansion) and hippocampal long-term potentiation (synaptic strengthening by repeated activation) both implement associative memory through Hebbian-like reinforcement; both require threshold activation, exhibit w",
      "open_unknowns": [],
      "related_hypotheses": [],
      "communication_gap": "Immunologists and neuroscientists developed affinity maturation and LTP as completely separate fields; Jerne's network theory (1974) hinted at immunological memory analogies to neural networks but was not formalized; the Hopfield network (1982) bridged neural and associative memory mathematics without connecting back to immunology.\n",
      "translation_table": [
        {
          "field_a_term": "B-cell somatic hypermutation (SHM)",
          "field_b_term": "Synaptic weight perturbation in memory consolidation",
          "note": "SHM introduces random mutations in antibody variable regions at ~10⁶× background rate; selection retains high-affinity variants analogous to Hebbian weight strengthening for frequently activated synapses.\n"
        },
        {
          "field_a_term": "Germinal center selection (affinity threshold)",
          "field_b_term": "LTP induction threshold (NMDA receptor coincidence detection)",
          "note": "B cells with affinity above a threshold receive T cell help and survive; NMDA receptors require both pre- and post-synaptic depolarization for LTP induction; both are coincidence detectors with sharp activation thresholds.\n"
        },
        {
          "field_a_term": "Clonal expansion of high-affinity B cells",
          "field_b_term": "Winner-take-all synaptic competition",
          "note": "High-affinity B cell clones outcompete low-affinity clones for T cell help (winner-take-all); in neural circuits, strong synapses grow at expense of weak ones (synaptic competition) through similar competitive dynamics.\n"
        },
        {
          "field_a_term": "Memory B cells (long-lived plasma cells)",
          "field_b_term": "Long-term potentiated synapses (structural LTP)",
          "note": "Both store memory in the strength of connections (antibody affinity / synaptic weight) that persist for years; both require protein synthesis for stable long-term storage (memory consolidation).\n"
        }
      ],
      "references": [
        {
          "doi": "10.1016/j.immuni.2015.01.006",
          "note": "Victora & Mesin (2014) — Germinal center dynamics revealed by multiphoton microscopy; Immunity 41:522"
        }
      ],
      "last_reviewed": "2026-05-07",
      "file": "cross-domain/biology-cs/b-immune-memory-x-long-term-potentiation.yaml"
    },
    {
      "id": "b-immune-system-x-anomaly-detection",
      "title": "Immune system x Anomaly detection - negative selection as one-class classification\n",
      "source_domain": "",
      "target_domain": "",
      "status": "proposed",
      "bridge_claim": "The adaptive immune system's negative selection process (deleting T-cells that recognize self-antigens in the thymus) is computationally equivalent to one-class classification and anomaly detection; the remaining T-cell repertoire is trained on self to recognize non-self, implementing the artificial",
      "open_unknowns": [],
      "related_hypotheses": [],
      "communication_gap": "Immunologists studying thymic selection and computer scientists developing anomaly detection algorithms worked in isolation; Forrest et al. (1994) explicitly formalized the connection via artificial immune systems, but mainstream machine learning (one-class SVM, autoencoders for anomaly detection) rarely cites immunological negative selection as the biological precursor — missing potential insights from somatic hypermutation as a model for continual learning.\n",
      "translation_table": [
        {
          "field_a_term": "thymic negative selection deleting self-reactive T-cells (immunology)",
          "field_b_term": "one-class classification trained on normal data, rejecting outliers (machine learning)",
          "note": "Negative selection is exactly one-class SVM / isolation forest trained on self-antigens; T-cells that fire on self are eliminated"
        },
        {
          "field_a_term": "MHC-peptide complex (self-antigen presentation) (immunology)",
          "field_b_term": "training data point in feature space (machine learning)",
          "note": "MHC-peptides are the feature vectors that define the self distribution; the T-cell receptor is the classifier"
        },
        {
          "field_a_term": "T-cell receptor hypervariable complementarity-determining regions (immunology)",
          "field_b_term": "detector strings / random feature vectors in Forrest et al. AIS (computer science)",
          "note": "The V(D)J recombination generates random detectors analogous to random projections for anomaly detection"
        },
        {
          "field_a_term": "clonal selection and expansion upon antigen recognition (immunology)",
          "field_b_term": "online learning / adaptive anomaly detection updating the model (machine learning)",
          "note": "Clonal expansion is reinforcement of detectors that found anomalies — equivalent to online updating of the anomaly detector"
        }
      ],
      "references": [
        {
          "doi": "10.1162/neco.1994.6.6.1064",
          "note": "Forrest et al. (1994) - Self-nonself discrimination in a computer; IEEE Symp Security and Privacy — AIS paper"
        }
      ],
      "last_reviewed": "2026-05-07",
      "file": "cross-domain/biology-cs/b-immune-system-x-anomaly-detection.yaml"
    },
    {
      "id": "b-information-theory-x-evolutionary-biology",
      "title": "Information Theory x Evolutionary Biology — natural selection as Bayesian inference\n",
      "source_domain": "",
      "target_domain": "",
      "status": "proposed",
      "bridge_claim": "Natural selection updates the population's genetic prior toward higher fitness using the same mathematical operation as Bayesian belief updating; Fisher's fundamental theorem of natural selection is the biological analogue of the data processing inequality in information theory.\n",
      "open_unknowns": [],
      "related_hypotheses": [],
      "communication_gap": "Biologists focus on mechanism and contingency; information theorists focus on optimality and compression. The mathematical isomorphism was noted by Frank (2009) but has not widely penetrated either field's textbooks.\n",
      "translation_table": [
        {
          "field_a_term": "Allele frequency distribution (prior)",
          "field_b_term": "Prior probability distribution",
          "note": "The population's initial allele distribution is the prior; selection is the likelihood function.\n"
        },
        {
          "field_a_term": "Fitness landscape W(g)",
          "field_b_term": "Likelihood function P(data|hypothesis)",
          "note": "Fitness evaluates how well a genotype explains the environment — mathematically identical to a likelihood function.\n"
        },
        {
          "field_a_term": "Fisher's fundamental theorem (dW/dt = Var(W))",
          "field_b_term": "Rate of KL divergence reduction",
          "note": "The rate of fitness increase equals the genetic variance, which is also the rate of KL divergence reduction between current and optimal allele distribution.\n"
        },
        {
          "field_a_term": "Genetic drift",
          "field_b_term": "Sampling noise in Bayesian updating",
          "note": "Finite population size introduces stochastic sampling error in the selection update — exactly analogous to Monte Carlo noise in Bayesian MCMC.\n"
        }
      ],
      "references": [
        {
          "doi": "10.1111/j.1469-185X.2009.00089.x",
          "note": "Frank (2009) — natural selection maximizes Fisher information — foundational connection"
        },
        {
          "doi": "10.1098/rspb.2013.2372",
          "note": "Krakauer et al. (2014) — information theory of individuality and evolution"
        }
      ],
      "last_reviewed": "2026-05-07",
      "file": "cross-domain/biology-cs/b-information-theory-x-evolutionary-biology.yaml"
    },
    {
      "id": "b-neural-plasticity-x-hebbian-learning",
      "title": "Neural Plasticity x Hebbian Learning — spike-timing dependent plasticity as correlation detector\n",
      "source_domain": "",
      "target_domain": "",
      "status": "proposed",
      "bridge_claim": "Spike-timing dependent plasticity (STDP) implements a temporal Hebbian learning rule: synapses strengthen when pre-synaptic spikes precede post-synaptic spikes (causal), and weaken for reverse order; STDP is mathematically equivalent to online gradient descent on a temporal prediction error, linking",
      "open_unknowns": [],
      "related_hypotheses": [],
      "communication_gap": "Computational neuroscientists derived STDP learning rules from biology in the 1990s; machine learning developed backpropagation independently; the connection (STDP ≈ online gradient descent) was made by Xie & Seung (2000) but remains underutilized in deep learning architectures.\n",
      "translation_table": [
        {
          "field_a_term": "STDP learning window Δw(Δt)",
          "field_b_term": "Temporal credit assignment kernel",
          "note": "The asymmetric STDP window (potentiation for Δt>0, depression for Δt<0) implements a causal filter that detects predictive pre→post spike ordering, equivalent to the eligibility trace in reinforcement learning.\n"
        },
        {
          "field_a_term": "Long-term potentiation (LTP)",
          "field_b_term": "Weight increase by gradient descent",
          "note": "NMDA receptor-mediated LTP requires coincident pre- and post-synaptic activity (Hebb's rule); mathematically this is the positive term in the gradient of a prediction error loss function.\n"
        },
        {
          "field_a_term": "Long-term depression (LTD)",
          "field_b_term": "Weight decrease (anti-Hebbian term)",
          "note": "LTD for reversed spike order (post before pre) provides the anti-Hebbian term needed for stable learning; without it, Hebbian learning diverges (runaway potentiation).\n"
        },
        {
          "field_a_term": "Homeostatic synaptic scaling",
          "field_b_term": "Weight normalization / L2 regularization",
          "note": "Synaptic scaling (global up/down regulation of all synapses to maintain target firing rate) is equivalent to L2 regularization in neural networks, preventing saturation of synaptic weights.\n"
        }
      ],
      "references": [
        {
          "doi": "10.1523/JNEUROSCI.18-24-10464.1998",
          "note": "Bi & Poo (1998) — Synaptic modifications in cultured hippocampal neurons; J Neurosci 18:10464"
        }
      ],
      "last_reviewed": "2026-05-07",
      "file": "cross-domain/biology-cs/b-neural-plasticity-x-hebbian-learning.yaml"
    },
    {
      "id": "b-neural-spike-coding-x-information-compression",
      "title": "Neural spike coding x Information compression — retinal ganglion cells as efficient encoders\n",
      "source_domain": "",
      "target_domain": "",
      "status": "proposed",
      "bridge_claim": "Retinal ganglion cell spike trains are efficient codes in the information-theoretic sense; center-surround receptive fields implement a whitening filter that removes spatial redundancy in natural images, maximizing mutual information per spike — a biological implementation of principal component ana",
      "open_unknowns": [],
      "related_hypotheses": [],
      "communication_gap": "Neurophysiologists studying retinal circuits and information theorists studying source coding developed parallel frameworks; Barlow's efficient coding hypothesis (1961) bridged them conceptually but quantitative testing required natural image statistics measurements that became practical only in the 1990s.\n",
      "translation_table": [
        {
          "field_a_term": "center-surround receptive field (retinal neuroscience)",
          "field_b_term": "whitening / decorrelation filter (signal processing)",
          "note": "The difference-of-Gaussians RF profile is the optimal linear filter for removing second-order spatial correlations in natural images"
        },
        {
          "field_a_term": "retinal ganglion cell spike rate (neuroscience)",
          "field_b_term": "efficient code / compressed representation (information theory)",
          "note": "Ganglion cells allocate more spikes to unexpected (high-information) features; sparse coding maximizes mutual information per action potential"
        },
        {
          "field_a_term": "lateral inhibition in retina (neuroscience)",
          "field_b_term": "redundancy reduction (information theory)",
          "note": "Lateral inhibition suppresses responses to spatially correlated (redundant) inputs, implementing Barlow's efficient coding hypothesis"
        }
      ],
      "references": [
        {
          "doi": "10.1038/381520a0",
          "note": "Olshausen & Field (1996) - Emergence of simple-cell receptive field properties by learning a sparse code for natural images; Nature 381:607"
        }
      ],
      "last_reviewed": "2026-05-07",
      "file": "cross-domain/biology-cs/b-neural-spike-coding-x-information-compression.yaml"
    },
    {
      "id": "b-swarm-intelligence-x-distributed-computing",
      "title": "Swarm intelligence x Distributed computing - ant colony as consensus algorithm\n",
      "source_domain": "",
      "target_domain": "",
      "status": "proposed",
      "bridge_claim": "Ant colony optimization (ACO) and honeybee swarm decision-making implement distributed consensus algorithms without central coordination; pheromone reinforcement in ACO is distributed gradient ascent on solution quality where each ant is a gradient estimator, and quorum sensing in honeybee swarming ",
      "open_unknowns": [],
      "related_hypotheses": [],
      "communication_gap": "Entomologists studying social insect collective behavior (Wilson 1971, Seeley 1995) and computer scientists developing distributed algorithms (Lamport 1982, Castro & Liskov 1999) built parallel theories of decentralized decision-making; Dorigo and Gambardella (1997) formalized ACO, but the connection between honeybee quorum sensing and Byzantine fault-tolerant consensus (Seeley & Visscher 2004) was only recently appreciated, offering biological precedents for robust decentralized computing.\n",
      "translation_table": [
        {
          "field_a_term": "pheromone trail concentration tau_ij on edge (i,j) (ant colony)",
          "field_b_term": "gradient estimate for path quality / belief in distributed optimization (computer science)",
          "note": "Pheromone concentration is a distributed stochastic gradient; stronger pheromone = higher belief in solution quality"
        },
        {
          "field_a_term": "evaporation rate rho in pheromone update (ant colony optimization)",
          "field_b_term": "forgetting factor / momentum decay in distributed gradient descent (optimization)",
          "note": "Pheromone evaporation prevents premature convergence to local optima, equivalent to momentum decay in adaptive optimizers"
        },
        {
          "field_a_term": "quorum threshold Q for honeybee nest site selection (biology)",
          "field_b_term": "Byzantine quorum / 2/3 majority threshold in distributed consensus (computer science)",
          "note": "Bees commit to a nest only after >Q scouts visit it, preventing convergence to poorly-explored sites analogous to BFT quorum systems"
        },
        {
          "field_a_term": "waggle dance duration proportional to site quality (biology)",
          "field_b_term": "weighted vote in distributed voting protocol (distributed systems)",
          "note": "Dance duration encodes site quality; bees stop dancing when they visit competing sites — implementing preference aggregation"
        }
      ],
      "references": [
        {
          "doi": "10.1007/BF00992699",
          "note": "Dorigo & Gambardella (1997) - Ant colony system: A cooperative learning approach to TSP; IEEE Trans Evol Comput 1:53"
        }
      ],
      "last_reviewed": "2026-05-07",
      "file": "cross-domain/biology-cs/b-swarm-intelligence-x-distributed-computing.yaml"
    },
    {
      "id": "b-microbiome-diversity-stability",
      "title": "Gut microbiome species diversity predicts community resilience to antibiotic perturbation and pathogen invasion, following May's theoretical diversity- stability relationship: higher phylogenetic diversity increases functional redundancy and reduces the probability that a single perturbation collapses the entire community.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "proposed",
      "bridge_claim": "May (1972) showed that in random ecological communities, stability (return to equilibrium after perturbation) decreases with diversity and interaction strength: σ²SC < 1 (May's criterion), where σ² is interaction variance, S is species richness, and C is connectance. However, structured communities ",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-microbiome-functional-redundancy-antibiotic-resilience"
      ],
      "communication_gap": "May's random-matrix stability results (published in Nature 1972) are central to theoretical ecology but rarely cited in microbiome medicine literature. Clinical microbiologists measure diversity indices empirically without formal dynamical stability theory; ecologists rarely engage with clinical data.\n",
      "translation_table": [
        {
          "field_a_term": "May's community matrix eigenvalue maximum (stability criterion)",
          "field_b_term": "Microbiome recovery rate (inverse return time after antibiotic)",
          "note": "Negative maximum eigenvalue → stable community; scales with functional redundancy"
        },
        {
          "field_a_term": "Ecological functional redundancy",
          "field_b_term": "Multiple gut taxa performing same fermentation/butyrate pathway",
          "note": "Redundant taxa buffer against stochastic extinction of individual species"
        },
        {
          "field_a_term": "Connectance C (fraction of realised interactions)",
          "field_b_term": "Microbiome co-occurrence network density",
          "note": "Higher connectance increases cross-feeding resilience but can destabilise under May's criterion"
        },
        {
          "field_a_term": "Diversity index (Shannon entropy H)",
          "field_b_term": "Gut microbiome α-diversity (16S rRNA amplicon sequencing)",
          "note": "H predicts recovery speed from Clostridioides difficile infection post-antibiotics"
        }
      ],
      "references": [
        {
          "doi": "10.1038/238413a0",
          "note": "May (1972) Nature – will a large complex system be stable? random-matrix stability criterion"
        },
        {
          "doi": "10.1038/nature05414",
          "note": "Turnbaugh et al. (2006) Nature – the human microbiome; diversity and metabolic function"
        },
        {
          "doi": "10.1126/science.aad9359",
          "note": "Sonnenburg & Sonnenburg (2019) Science – diet-induced microbiome diversity and resilience"
        },
        {
          "doi": "10.1038/s41586-019-1406-7",
          "note": "Martiny et al. – functional redundancy in microbiomes provides stability under perturbation"
        }
      ],
      "last_reviewed": "2026-05-07",
      "file": "cross-domain/biology-ecology/b-microbiome-diversity-stability.yaml"
    },
    {
      "id": "b-biofilm-self-assembly",
      "title": "Bacterial biofilm formation via quorum sensing is a chemical-order-parameter phase transition governed by the same self-assembly mathematics as colloidal and block-copolymer nanostructure assembly",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "Bacterial biofilm formation is a phase transition from planktonic (disordered) to biofilm (structured) states triggered when autoinducer concentration (N-acyl homoserine lactones) crosses a critical threshold — an order parameter analogous to those governing colloidal particle self-assembly and bloc",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-biofilm-threshold-colloidal-phase-transition-analogy"
      ],
      "communication_gap": "Microbiology and materials science operate in separate journals and conferences. The language of free-energy landscapes and phase diagrams is native to materials science but rarely applied formally to bacterial ecology. Funding structures rarely bridge NIH (biology) and NSF-DMR (materials).\n",
      "translation_table": [
        {
          "field_a_term": "autoinducer concentration",
          "field_b_term": "order parameter (volume fraction, chi·N)",
          "note": "Critical threshold triggers phase transition in both biological and synthetic systems"
        },
        {
          "field_a_term": "quorum sensing threshold",
          "field_b_term": "spinodal / binodal decomposition point",
          "note": "Both define the boundary between stable disordered and structured phases"
        },
        {
          "field_a_term": "biofilm matrix (EPS)",
          "field_b_term": "self-assembled nanostructure (micelle, lamella)",
          "note": "Emergent ordered structures that appear above threshold"
        },
        {
          "field_a_term": "gene expression switch",
          "field_b_term": "nucleation event",
          "note": "Stochastic first passage triggering ordered-phase growth"
        }
      ],
      "references": [
        {
          "note": "Fuqua et al. (1994) — quorum sensing discovery",
          "doi": "10.1128/jb.176.2.269-275.1994"
        },
        {
          "note": "Costerton et al. (1999) — biofilm science review",
          "doi": "10.1126/science.284.5418.1318"
        },
        {
          "note": "Hall-Stoodley et al. (2004) — biofilm development review",
          "doi": "10.1038/nrmicro821"
        },
        {
          "note": "Ng & Bassler (2009) — quorum sensing mechanisms",
          "doi": "10.1146/annurev.genet.43.110708.163243"
        }
      ],
      "last_reviewed": "2026-05-06",
      "file": "cross-domain/biology-engineering/b-biofilm-self-assembly.yaml"
    },
    {
      "id": "b-crispr-cas9-gene-editing",
      "title": "CRISPR-Cas9 programmable endonuclease — guided by 20-nt sgRNA to a PAM-adjacent target — creates precise double-strand breaks repaired by NHEJ or HDR, enabling base editors (A→G without DSB) and prime editors (any 12-nt change via reverse transcriptase) now entering clinical use for sickle cell disease (FDA 2023).\n",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "The CRISPR-Cas9 system (Doudna-Charpentier Nobel 2020) repurposes a prokaryotic adaptive immune mechanism as a precision genome-engineering tool. The single-guide RNA (sgRNA) — a fusion of CRISPR RNA (crRNA) and trans-activating crRNA (tracrRNA) — directs the Cas9 endonuclease to a 20-nucleotide DNA",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-prime-editing-hdr-bypass-therapeutic-window"
      ],
      "communication_gap": "CRISPR research spans microbiology (discovery), structural biology (mechanism), protein engineering (optimization), clinical medicine (translation), and bioethics (governance) — communities with largely separate publication venues (Nature, Science, Cell, NEJM, Nature Biotech) and conferences. The 2018 He Jiankui affair (germline editing) has further siloed basic researchers from clinical translation due to regulatory uncertainty. Engineering principles (off-target scoring, delivery vehicle design) are developed by biotech companies and often not published in peer-reviewed literature, creating a science-industry knowledge gap.\n",
      "translation_table": [
        {
          "field_a_term": "PAM-adjacent 20-nt target sequence",
          "field_b_term": "address register in a programmable processor",
          "note": "changing the sgRNA sequence reprograms the cut site — modular design principle"
        },
        {
          "field_a_term": "Cas9 RuvC + HNH nuclease domains",
          "field_b_term": "dual cutting-head in a molecular scissors machine"
        },
        {
          "field_a_term": "NHEJ repair (error-prone, fast)",
          "field_b_term": "destructive write (delete/disrupt) operation"
        },
        {
          "field_a_term": "HDR repair with template (precise, slow)",
          "field_b_term": "exact overwrite (replace) operation with specified payload"
        },
        {
          "field_a_term": "adenine base editor (ABE) — deaminase fused to nCas9",
          "field_b_term": "single-base chemical converter — targeted point mutation repair"
        },
        {
          "field_a_term": "prime editor pegRNA + nCas9-RT",
          "field_b_term": "search-and-replace molecular text editor"
        }
      ],
      "references": [
        {
          "doi": "10.1126/science.1225829",
          "note": "Jinek et al. (2012) Science 337:816 — Cas9 programmable dual-RNA guided DNA endonuclease"
        },
        {
          "doi": "10.1126/science.1231143",
          "note": "Cong et al. (2013) Science 339:819 — multiplex genome engineering using CRISPR/Cas systems"
        },
        {
          "doi": "10.1038/nature17946",
          "note": "Komor et al. (2016) Nature 533:420 — programmable editing of a target base in genomic DNA without DSB"
        },
        {
          "doi": "10.1038/s41586-019-1711-4",
          "note": "Anzalone et al. (2019) Nature 576:149 — search-and-replace genome editing without DSBs or donor DNA"
        }
      ],
      "last_reviewed": "2026-05-06",
      "file": "cross-domain/biology-engineering/b-crispr-cas9-gene-editing.yaml"
    },
    {
      "id": "b-crispr-diagnostics-point-of-care",
      "title": "CRISPR Diagnostics and Point-of-Care Testing — SHERLOCK and DETECTR exploit Cas13/Cas12 collateral cleavage for attomolar-sensitivity, paper-based pathogen detection",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "Beyond gene editing, CRISPR-associated nucleases are powerful diagnostic biosensors that exploit the same guide-RNA base-pairing specificity used in genome editing but repurposed for target detection. Two platforms dominate. SHERLOCK (Specific High- sensitivity Enzymatic Reporter unLOCKing, Gootenbe",
      "open_unknowns": [],
      "related_hypotheses": [],
      "communication_gap": "The CRISPR diagnostics community sits at the intersection of molecular biology, bioengineering, and global health — communities that rarely design research together. The technology was developed primarily in academic research labs (Doudna/Collins) and required pandemic urgency (COVID-19) to accelerate translation into point-of- care products. Regulatory approval pathways for nucleic acid diagnostics are complex and slow; many CRISPR diagnostic advances remain in research stage while inferior PCR-based tests dominate clinical practice.\n",
      "translation_table": [
        {
          "field_a_term": "Cas13a collateral trans-cleavage (RNase activity)",
          "field_b_term": "signal amplification step in SHERLOCK assay",
          "note": "Each activated Cas13a cleaves ~1000 reporter molecules/min — massive signal gain without PCR cycling"
        },
        {
          "field_a_term": "crRNA programmability (20-nt spacer design)",
          "field_b_term": "sequence-specific pathogen detection with rapid reprogramming",
          "note": "New diagnostic can be designed and synthesised in hours when pathogen genome sequence is known"
        },
        {
          "field_a_term": "lateral flow readout (paper-based)",
          "field_b_term": "instrument-free visual result analogous to pregnancy test",
          "note": "Streptavidin–FAM reporter gives yes/no result without laboratory infrastructure; critical for LMIC deployment"
        },
        {
          "field_a_term": "isothermal pre-amplification (RPA or LAMP)",
          "field_b_term": "nucleic acid amplification without thermocycler",
          "note": "RPA at 37–42°C; LAMP at 65°C — battery-powered heat source sufficient; removes the PCR cold-chain requirement"
        },
        {
          "field_a_term": "CARMEN multiplexing (Cas13 combinatorial array)",
          "field_b_term": "simultaneous screening for 169 viruses from one patient sample",
          "note": "Microfluidic array pairs each sample with each CRISPR probe; enables syndromic respiratory panel in resource-limited settings"
        },
        {
          "field_a_term": "attomolar sensitivity (10⁻¹⁸ M detection limit)",
          "field_b_term": "detection of single-copy pathogen nucleic acid per µL without culture",
          "note": "Orders of magnitude more sensitive than antigen tests; approaches single-molecule detection with amplification"
        }
      ],
      "references": [
        {
          "doi": "10.1126/science.aam9321",
          "note": "Gootenberg et al. (2017) Science 356:438 — SHERLOCK Cas13a diagnostic"
        },
        {
          "doi": "10.1126/science.aar2731",
          "note": "Chen et al. (2018) Science 360:436 — DETECTR Cas12a diagnostic"
        },
        {
          "doi": "10.1038/s41596-019-0210-2",
          "note": "Kellner et al. (2019) Nat Protoc 14:2986 — SHERLOCK protocol"
        },
        {
          "doi": "10.1126/science.aas9192",
          "note": "Myhrvold et al. (2018) Science 360:444 — CARMEN multiplexed virus detection"
        }
      ],
      "last_reviewed": "2026-05-06",
      "file": "cross-domain/biology-engineering/b-crispr-diagnostics-point-of-care.yaml"
    },
    {
      "id": "b-neuromuscular-control-biomechanics",
      "title": "Muscle contraction (Huxley sliding filament, Hill force-velocity relation) and the neuromuscular control hierarchy (motor unit size principle, spindle reflex loops) constitute a biological servomechanism that engineering control theory can model as a force-controlled actuator with nested feedback loops and nonlinear plant dynamics.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "Skeletal muscle is a molecular motor operating via the sliding filament mechanism (Huxley 1957): myosin S1 heads cycle through attachment to actin, a 5 nm power stroke driven by ATP hydrolysis, and detachment — a Lymn-Taylor cross-bridge cycle. The macroscopic force-velocity relation is Hill's equat",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-neuromuscular-size-principle-metabolic-optimality"
      ],
      "communication_gap": "Muscle physiology is taught in biomedical science with emphasis on biochemistry (cross-bridge kinetics, excitation-contraction coupling) and rarely connected to engineering control theory. Robotics engineers who design actuators do not typically study Hill equations or Henneman's size principle. Rehabilitation engineers who build exoskeletons and prosthetics often use simplified muscle models without connecting to the full neuromuscular control hierarchy.\n",
      "translation_table": [
        {
          "field_a_term": "actuator with nonlinear force-velocity characteristic",
          "field_b_term": "skeletal muscle with Hill equation F(v)",
          "note": "Hill equation is a hyperbola; engineering actuators are usually linear or modelled as such"
        },
        {
          "field_a_term": "proportional feedback controller (spinal reflex)",
          "field_b_term": "Ia spindle afferent stretch reflex (monosynaptic)",
          "note": "The stretch reflex is a velocity and length feedback loop with approximately proportional-derivative gain"
        },
        {
          "field_a_term": "force limiting / anti-windup",
          "field_b_term": "Golgi tendon organ Ib inhibitory reflex",
          "note": "GTOs protect tendons from excessive force by inhibiting α-motor neurons"
        },
        {
          "field_a_term": "recruitment ordering (size principle)",
          "field_b_term": "Henneman's principle — slow-twitch S → fast-twitch FR → FF recruitment",
          "note": "Optimises fatigue resistance and efficiency by recruiting the most efficient (slow, oxidative) units first"
        },
        {
          "field_a_term": "EMG signal as motor command estimate",
          "field_b_term": "electromyography decodes motor unit action potentials for force estimation",
          "note": "High-density surface EMG can decompose individual motor unit firing patterns for BCI and rehabilitation"
        }
      ],
      "references": [
        {
          "doi": "10.1016/S0079-6107(57)90048-6",
          "note": "Huxley, A.F. (1957) Prog Biophys Biophys Chem 7:255 — muscle contraction by sliding filaments; cross-bridge model"
        },
        {
          "doi": "10.1098/rspb.1938.0050",
          "note": "Hill, A.V. (1938) Proc R Soc B 126:136 — heat of shortening and dynamic constants of muscle; Hill equation"
        },
        {
          "note": "Enoka, R.M. (2015) Neuromechanics of Human Movement, 5th ed. Human Kinetics."
        },
        {
          "doi": "10.1152/jn.1965.28.3.560",
          "note": "Henneman et al. (1965) J Neurophysiol 28:560 — functional significance of motor unit size; size principle"
        }
      ],
      "last_reviewed": "2026-05-06",
      "file": "cross-domain/biology-engineering/b-neuromuscular-control-biomechanics.yaml"
    },
    {
      "id": "b-optogenetics-neural-circuit-control",
      "title": "Optogenetics bridges biology and engineering: viral delivery of algal channelrhodopsin-2 and archaeal halorhodopsin to specific neuron types enables millisecond-precision optical control of neural circuits, culminating in the first human vision restoration trial in 2021.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "Optogenetics (Boyden & Deisseroth 2005) uses light-gated ion channels from microorganisms to control neural activity with millisecond precision. Engineering components: (1) Actuators: channelrhodopsin-2 (ChR2, from Chlamydomonas reinhardtii) ΓÇö blue light (470 nm) opens non-selective cation channel",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-optogenetic-restoration-vision-scales-to-complex-percepts"
      ],
      "communication_gap": "Neuroscientists who use optogenetics experimentally focus on circuit-level questions (which neurons drive behavior) without engaging with the engineering challenges (light scattering in tissue, heat deposition, immune responses to viral vectors, fiber implant trauma). Biomedical engineers developing light delivery systems rarely engage with the neuroscience circuit questions that motivate the tool. The human therapy translation gap ΓÇö from mouse circuit experiments to human gene therapy ΓÇö requires regulatory, ethical, and manufacturing expertise beyond either field.\n",
      "translation_table": [
        {
          "field_a_term": "ChR2 (channelrhodopsin-2)",
          "field_b_term": "genetically encoded optical neural activator; light-gated NaΓü║/KΓü║ channel",
          "note": "retinal cofactor (all-trans retinal, endogenous in brain) isomerizes on photon absorption ΓåÆ channel opens"
        },
        {
          "field_a_term": "AAV serotype specificity (AAV9, PHP.eB)",
          "field_b_term": "viral vector engineering for targeted gene delivery",
          "note": "capsid protein variants determine tropism; PHP.eB was developed by directed evolution"
        },
        {
          "field_a_term": "cell-type-specific promoter (CaMKII, GAD, Th)",
          "field_b_term": "genetic targeting ΓÇö opsin expressed only in desired cell type",
          "note": "specificity achieved by promoter + AAV serotype combination; can be further restricted by Cre recombinase"
        },
        {
          "field_a_term": "fiber optic implant (light delivery)",
          "field_b_term": "photonic engineering ΓÇö coupling coherent light to brain tissue",
          "note": "scattering and absorption limit depth (< 1 mm from fiber tip); gradient-index lens for multiphoton"
        },
        {
          "field_a_term": "DREADD (hM3Dq, hM4Di)",
          "field_b_term": "pharmacological neural control; ligand-gated synthetic receptor",
          "note": "CNO ΓåÆ active CNO metabolite clozapine (Gomez 2017) ΓåÆ crosses BBB ΓåÆ activates DREADD; slow but non-invasive"
        }
      ],
      "references": [
        {
          "doi": "10.1038/nn1525",
          "note": "Boyden et al. (2005) Millisecond-timescale, genetically targeted optical control of neural activity; Nat Neurosci 8:1263"
        },
        {
          "note": "Deisseroth (2011) Optogenetics; Sci Am 303:48"
        },
        {
          "doi": "10.1073/pnas.0700293104",
          "note": "Armbruster et al. (2007) Evolving the lock to fit the key to create a family of G protein-coupled receptors potently activated by an inert ligand; PNAS 104:5163"
        },
        {
          "doi": "10.1038/s41591-021-01351-4",
          "note": "Sahel et al. (2021) Partial recovery of visual function in a blind patient after optogenetic therapy; Nat Med 27:1223"
        }
      ],
      "last_reviewed": "2026-05-07",
      "file": "cross-domain/biology-engineering/b-optogenetics-neural-circuit-control.yaml"
    },
    {
      "id": "b-synthetic-biology-circuit-design",
      "title": "Synthetic biology applies electrical engineering design principles to genetic circuits: Gardner's toggle switch (2000) implements bistable flip-flop logic, Elowitz's repressilator (2000) implements a ring oscillator, and retroactivity from circuit loading — analogous to impedance mismatch — requires biological insulator modules to compose circuits without unintended cross-coupling.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "Synthetic biology (Endy 2005) applies electrical engineering abstraction principles — modularity, standardization, composability — to genetic parts. The toggle switch (Gardner et al. 2000): two mutually repressing genes — dX/dt = α₁/(1+Y^β) - X; dY/dt = α₂/(1+X^γ) - Y — is bistable when β,γ > 1 (coo",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-synthetic-insulator-retroactivity-control"
      ],
      "communication_gap": "Electrical engineers and molecular biologists study in separate departments with different mathematical training — engineers use Laplace transforms and Bode plots while biologists use differential equations and gene expression assays. Synthetic biology bridged this gap at the level of design metaphors (parts, devices, systems) but the quantitative transfer of control theory tools (e.g., Bode analysis of genetic circuits) remains limited. Retroactivity is known in the field but rarely quantified in published synthetic biology papers, limiting composability of published devices.\n",
      "translation_table": [
        {
          "field_a_term": "promoter + ribosome binding site (RBS) + gene + terminator",
          "field_b_term": "electronic component with defined input/output transfer function",
          "note": "promoter strength = gain; RBS efficiency = current amplification factor"
        },
        {
          "field_a_term": "toggle switch bistability (Hill coefficient β,γ > 1)",
          "field_b_term": "SR latch (set-reset flip-flop) in digital electronics",
          "note": "bistability requires sufficient cooperative repression; monotone repressors cannot toggle"
        },
        {
          "field_a_term": "repressilator ring oscillator period T",
          "field_b_term": "ring oscillator period in CMOS (3-inverter ring) T = 6τ_pd",
          "note": "both depend on the delay per stage; degradation time ~ propagation delay"
        },
        {
          "field_a_term": "retroactivity to the input of a module",
          "field_b_term": "input impedance loading in electronic circuits",
          "note": "low input impedance (high retroactivity) perturbs the upstream module's dynamics"
        },
        {
          "field_a_term": "biological insulator module (phosphorylation cascade)",
          "field_b_term": "electronic buffer amplifier (high input impedance, low output impedance)",
          "note": "insulator absorbs retroactivity while faithfully transmitting the signal"
        }
      ],
      "references": [
        {
          "doi": "10.1038/35002131",
          "note": "Gardner et al. (2000) Construction of a genetic toggle switch in Escherichia coli. Nature 403:339–342"
        },
        {
          "doi": "10.1038/35002125",
          "note": "Elowitz & Leibler (2000) A synthetic oscillatory network of transcriptional regulators. Nature 403:335–338"
        },
        {
          "doi": "10.1038/msb.2008.19",
          "note": "Del Vecchio et al. (2008) Modular cell biology: retroactivity and insulation. Mol Syst Biol 4:161"
        },
        {
          "doi": "10.1038/nmeth.2926",
          "note": "Brophy & Voigt (2014) Principles of genetic circuit design. Nat Methods 11:508–520"
        }
      ],
      "last_reviewed": "2026-05-06",
      "file": "cross-domain/biology-engineering/b-synthetic-biology-circuit-design.yaml"
    },
    {
      "id": "b-tensegrity-cytoskeleton-mechanics",
      "title": "The cellular cytoskeleton implements biological tensegrity — a structural engineering principle where continuous tension (actin filaments, intermediate filaments) and discontinuous compression (microtubules) create mechanically stable structures whose stiffness scales with prestress — explaining how cells maintain shape, sense substrate stiffness, and transmit mechanical signals to the nucleus.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "proposed",
      "bridge_claim": "Buckminster Fuller's tensegrity structures distribute mechanical loads through pre-stressed tension networks rather than rigid frames, giving them high stiffness- to-weight ratios and predictable non-linear mechanical responses. Donald Ingber recognised that cells exhibit identical mechanical behavi",
      "open_unknowns": [],
      "related_hypotheses": [],
      "communication_gap": "",
      "translation_table": [
        {
          "field_a_term": "prestress (internal tension in tensegrity strut network)",
          "field_b_term": "cytoskeletal tension from actomyosin contractility",
          "note": "The pre-existing tension that stiffens the structure before external load"
        },
        {
          "field_a_term": "compression member (strut)",
          "field_b_term": "microtubule (bears compression in the cytoskeletal tensegrity)",
          "note": "Microtubules buckle at physiological compressive loads — as tensegrity predicts"
        },
        {
          "field_a_term": "stiffness G ~ prestress sigma_0",
          "field_b_term": "cell stiffness scales linearly with cytoskeletal tension",
          "note": "The linear scaling between prestress and stiffness is the key tensegrity prediction"
        },
        {
          "field_a_term": "force transmission through the network",
          "field_b_term": "mechanotransduction (nucleus responds to substrate stiffness)",
          "note": "Tensegrity predicts force is transmitted without attenuation to the nucleus"
        }
      ],
      "references": [
        {
          "doi": "10.1146/annurev.ph.59.030197.000425",
          "note": "Ingber (1997) - tensegrity: the architectural basis of cellular mechanotransduction"
        },
        {
          "doi": "10.1073/pnas.082095599",
          "note": "Wang et al. (2001) - mechanical behavior in living cells consistent with the tensegrity model"
        },
        {
          "doi": "10.1038/35071656",
          "note": "Ingber (2003) - tensegrity I: cell structure and hierarchical systems biology"
        }
      ],
      "last_reviewed": "2026-05-07",
      "file": "cross-domain/biology-engineering/b-tensegrity-cytoskeleton-mechanics.yaml"
    },
    {
      "id": "b-tissue-engineering-regenerative-medicine",
      "title": "Tissue engineering bridges biology and engineering: scaffolds, cells, and bioreactors combine to produce functional tissue replacements, with the vascularization bottleneck (diffusion limit of O₂ at ~200 μm) as the central engineering constraint, and organoids as the biological self-organization model that partially bypasses scaffold requirements.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "Tissue engineering (Langer & Vacanti 1993) combines principles from engineering and biology: a scaffold (structural support, matching mechanical properties of target tissue), seeded with cells (patient-derived or stem cells), cultured in a bioreactor (mechanical conditioning, nutrient delivery), pro",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-sacrificial-templating-vascular-network-bioprinting"
      ],
      "communication_gap": "Tissue engineering requires materials science (scaffold chemistry), cell biology (stem cells, differentiation), biomedical engineering (bioreactor design, fluid mechanics), and clinical medicine (transplantation, regulatory approval) — disciplines with separate journals, funding agencies, and institutional departments. The vascularization problem requires fluid dynamics (Navier-Stokes in porous media), oxygen transport (reaction-diffusion equations), angiogenesis biology, and surgical anastomosis — a combination that no single research group typically commands. Regulatory frameworks (FDA 510(k), PMA, HCT/P classification) create additional barriers that slow translation from engineering laboratory to clinical application.\n",
      "translation_table": [
        {
          "field_a_term": "scaffold porosity and pore interconnectivity (engineering parameter)",
          "field_b_term": "extracellular matrix (ECM) architecture (biological equivalent)",
          "note": "scaffold pores serve the same function as ECM pores — cell adhesion, nutrient diffusion, waste removal"
        },
        {
          "field_a_term": "bioreactor (controlled perfusion, mechanical loading, O₂ delivery)",
          "field_b_term": "in vivo physiological environment (blood flow, mechanical stress, hormonal signals)",
          "note": "bioreactor mimics in vivo conditions; mechanical conditioning (pulsatile flow) is essential for cardiovascular TE"
        },
        {
          "field_a_term": "vascularization bottleneck (O₂ diffusion limit ~200 μm)",
          "field_b_term": "Krogh cylinder model (classical physiology capillary geometry model)",
          "note": "Krogh (1919) derived the 200 μm capillary spacing from O₂ diffusion-reaction equations"
        },
        {
          "field_a_term": "scaffold stiffness matching (Young's modulus E, Pa)",
          "field_b_term": "mechanotransduction / matrix stiffness sensing (YAP/TAZ pathway)",
          "note": "cells sense scaffold stiffness and differentiate accordingly — mechanical cue = biochemical signal"
        },
        {
          "field_a_term": "bioprinting resolution (μm, determined by nozzle diameter / laser spot)",
          "field_b_term": "capillary diameter (~7 μm erythrocyte, ~10 μm capillary lumen)",
          "note": "printing resolution must match or exceed capillary diameter to print functional vasculature"
        }
      ],
      "references": [
        {
          "doi": "10.1126/science.8493529",
          "note": "Langer & Vacanti (1993) Tissue engineering; Science 260:920"
        },
        {
          "doi": "10.1016/j.cell.2016.03.023",
          "note": "Clevers (2016) Modeling development and disease with organoids; Cell 165:1586"
        },
        {
          "doi": "10.1038/nbt.2958",
          "note": "Murphy & Atala (2014) 3D bioprinting of tissues and organs; Nat Biotechnol 32:773"
        },
        {
          "doi": "10.1126/science.215.4529.174",
          "note": "Yannas et al. (1982) Wound tissue can utilize a polymeric template to synthesize a functional extension of skin; Science 215:174"
        }
      ],
      "last_reviewed": "2026-05-06",
      "file": "cross-domain/biology-engineering/b-tissue-engineering-regenerative-medicine.yaml"
    },
    {
      "id": "b-dna-digital-error-correcting-code",
      "title": "The genetic code is a near-optimal digital error-correcting code: codon degeneracy implements a natural parity-check scheme that minimises the chemical impact of single-base mutations, and the 64-codon/20-amino-acid mapping operates near the Shannon capacity of the DNA replication channel.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "Shannon's channel coding theorem (1948) establishes that for any noisy channel with capacity C = B log₂(1 + SNR), there exist codes that transmit information with arbitrarily small error probability at rates below C. The genetic code — the mapping from 64 codons (triplets of the four DNA bases A, T,",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-genetic-code-near-optimal-error-correction"
      ],
      "communication_gap": "Molecular biologists who study the genetic code are generally not trained in Shannon information theory or algebraic coding theory. Coding theorists are unaware that the genetic code is a solved real- world instance of their design problem, optimised over billions of years. Freeland & Hurst (1998) made the connection accessible, but the result is treated as a curiosity rather than an entry point for rigorous coding-theoretic analysis in the biology literature. The quantitative channel-capacity framing (Itzkovitz & Alon 2007) remains almost entirely within the bioinformatics community.\n",
      "translation_table": [
        {
          "field_a_term": "Channel capacity C = B log₂(1 + SNR)",
          "field_b_term": "Maximum information throughput of the DNA replication machinery",
          "note": "Noise source = polymerase error rate; bandwidth = genome size × replication rate"
        },
        {
          "field_a_term": "Codeword (binary string of length n)",
          "field_b_term": "Codon (nucleotide triplet from {A, U, G, C}^3)",
          "note": "64 = 4^3 possible codewords; quaternary rather than binary alphabet"
        },
        {
          "field_a_term": "Minimum Hamming distance d_min of a code",
          "field_b_term": "Minimum number of base changes needed to convert one amino acid codon to another",
          "note": "d_min ≥ 2 at the amino-acid level for the natural code under single-base substitution"
        },
        {
          "field_a_term": "Parity-check matrix H",
          "field_b_term": "Synonymous codon degeneracy pattern (the wobble rules)",
          "note": "Wobble position (third base) encodes redundancy analogous to a parity bit"
        },
        {
          "field_a_term": "Decoding error (bit-flip)",
          "field_b_term": "Non-synonymous mutation (amino acid change)",
          "note": "Both are the error event the code is designed to minimise"
        },
        {
          "field_a_term": "Frame synchronisation signal",
          "field_b_term": "Stop codons UAA, UAG, UGA",
          "note": "Stop codons truncate misframed reading, analogous to re-synchronising a data stream"
        },
        {
          "field_a_term": "Code rate R = k/n",
          "field_b_term": "Information content per codon = log₂(20)/log₂(64) ≈ 0.70",
          "note": "The genetic code has effective rate 0.70 — well below the quaternary channel capacity, allowing redundancy"
        }
      ],
      "references": [
        {
          "doi": "10.1007/PL00006310",
          "note": "Freeland & Hurst (1998) J Mol Evol 47:238 — the genetic code is one in a million: near-optimal error minimisation"
        },
        {
          "doi": "10.1101/gr.5947507",
          "note": "Itzkovitz & Alon (2007) Genome Res 17:405 — the genetic code is nearly optimal for allowing additional information within protein-coding sequences"
        },
        {
          "note": "Shannon (1948) Bell Syst Tech J 27:379 — A Mathematical Theory of Communication",
          "url": "https://doi.org/10.1002/j.1538-7305.1948.tb01338.x"
        },
        {
          "doi": "10.1093/oxfordjournals.molbev.a025952",
          "note": "Sella & Ardell (2006) — the coevolution of genes and genetic codes; evolutionary optimisation of the code"
        },
        {
          "doi": "10.1093/bioinformatics/btg1019",
          "note": "Gilis et al. (2001) — optimization of the genetic code: the amino acid polar requirement and the origin of life"
        }
      ],
      "last_reviewed": "2026-05-05",
      "file": "cross-domain/biology-information/b-dna-digital-error-correcting-code.yaml"
    },
    {
      "id": "b-codon-usage-translational-efficiency",
      "title": "Codon usage bias encodes translational kinetics as an information channel: synonymous codons are not equivalent in translation speed, and organisms optimise codon usage to maximise ribosome throughput — a rate-distortion problem where the coding redundancy of the genetic code is exploited to tune the channel capacity of the translation machinery.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "proposed",
      "bridge_claim": "The genetic code has 64 codons encoding 20 amino acids plus stop signals, giving ~1.5 bits of coding redundancy per codon. Synonymous codons (different codons for the same amino acid) are used non-uniformly across genomes, with the preferred codons correlating with the abundance of cognate tRNAs. Th",
      "open_unknowns": [],
      "related_hypotheses": [],
      "communication_gap": "",
      "translation_table": [
        {
          "field_a_term": "channel capacity C = max I(X;Y)",
          "field_b_term": "maximum ribosome throughput (proteins per unit time per mRNA)",
          "note": "Set by tRNA pool composition and codon usage; optimised by synonymous codon choice"
        },
        {
          "field_a_term": "code redundancy",
          "field_b_term": "synonymous codon degeneracy (number of codons per amino acid)",
          "note": "The redundancy that allows encoding of translational speed information"
        },
        {
          "field_a_term": "rate-distortion function R(D)",
          "field_b_term": "codon optimisation trade-off: speed vs. folding fidelity",
          "note": "Fast codons may outpace co-translational folding; optimal code balances both"
        },
        {
          "field_a_term": "queuing / traffic jam",
          "field_b_term": "ribosome collision (slow codons cause upstream ribosome pileup)",
          "note": "The 5' ramp of slow codons spaces out ribosomes to prevent collisions"
        }
      ],
      "references": [
        {
          "doi": "10.1073/pnas.0704118104",
          "note": "Tuller et al. (2007) — translation efficiency is determined by both codon bias and the secondary structure of the mRNA"
        },
        {
          "doi": "10.1038/nature06054",
          "note": "Hershberg & Petrov (2008) — selection on codon bias"
        },
        {
          "doi": "10.1016/j.cell.2010.05.001",
          "note": "Ingolia et al. (2009) — genome-wide analysis of translation reveals a ribosomal ramp"
        }
      ],
      "last_reviewed": "2026-05-07",
      "file": "cross-domain/biology-information-theory/b-codon-usage-translational-efficiency.yaml"
    },
    {
      "id": "b-collective-animal-behavior-information-cascade-quorum-sensing",
      "title": "Collective animal behaviors — fish schooling, bird murmurations, insect swarms — use information cascade and quorum sensing mechanisms that bridge biology and information theory: individuals integrate local signals to make collective decisions whose speed, accuracy, and robustness are governed by the same signal detection and information aggregation principles as engineered sensor networks.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "Quorum sensing in bacteria: the threshold concentration S_q where gene expression switches satisfies ∂F/∂S = 0 (hill function bistability), giving a sharp collective switch at population density N > N_q. For animal groups, the voter model with noise η gives collective decision accuracy ∝ N^(1/2) (wi",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-criticality-hypothesis-animal-groups-optimal-information-transfer"
      ],
      "communication_gap": "Collective behavior biologists studying schooling fish and information theorists studying distributed detection and information cascades share mathematical frameworks (voter models, Bayesian aggregation, criticality), but the explicit channel capacity analysis of animal collectives is underdeveloped; the criticality hypothesis (Bialek et al.) is the most prominent bridge but remains contested in empirical tests.\n",
      "translation_table": [
        {
          "field_a_term": "quorum threshold in bacterial collective behavior (biology)",
          "field_b_term": "decision threshold in Bayesian information aggregation (information theory)",
          "note": "Quorum sensing implements a maximum-likelihood decision rule at population level"
        },
        {
          "field_a_term": "fish school alignment and cohesion (biology)",
          "field_b_term": "synchronization / noise reduction through averaging (information theory)",
          "note": "N-fish alignment reduces orientation noise by √N; mutual information with true predator direction increases"
        },
        {
          "field_a_term": "information cascade / herding (biology)",
          "field_b_term": "incorrect Bayesian updating from correlated private signals (information theory)",
          "note": "Cascade collapse (all follow one) is the biology-information equivalent of rational herding failure"
        },
        {
          "field_a_term": "criticality in animal groups (biology)",
          "field_b_term": "maximum information transmission near phase transition (information theory)",
          "note": "At criticality correlation length → ∞; group transfers information maximally efficiently"
        }
      ],
      "references": [
        {
          "doi": "10.1073/pnas.1118633109",
          "note": "Bialek et al. (2012) - statistical mechanics for natural flocks of birds (criticality)"
        },
        {
          "doi": "10.1126/science.1230020",
          "note": "Rosenthal et al. (2015) - revealing the hidden networks of interaction in mobile animal groups"
        },
        {
          "doi": "10.1006/jtbi.1998.0628",
          "note": "Sumpter (2006) - the principles of collective animal behaviour"
        }
      ],
      "last_reviewed": "2026-05-07",
      "file": "cross-domain/biology-information-theory/b-collective-animal-behavior-information-cascade-quorum-sensing.yaml"
    },
    {
      "id": "b-crispr-multiplex-pooling-x-barcode-redundancy-intuition",
      "title": "Multiplexed CRISPR perturbation screens pool many distinct guide RNAs or targets into bulk assays and infer genetic effects by decoding barcode identities — abstractly reminiscent of designing redundant identifiers so pooled measurements tolerate dropout or misreads — **not** claiming biological machinery implements Reed–Solomon codes; only an information-design analogy for experimental planning.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "proposed",
      "bridge_claim": "High-throughput pooled CRISPR experiments assign binary-like signatures to perturbations so downstream sequencing demultiplexes signals — coding theory supplies intuition about Hamming distance and redundancy versus noise — practitioners borrow language (“barcodes”, “collision”) compatible with clas",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-barcode-spacing-heuristic-lowers-decoding-error-measured-in-negative-controls"
      ],
      "communication_gap": "Molecular biologists adopt sequencing-heavy pooled screens faster than they absorb formal channel-capacity constraints; coding theorists seldom publish in CRISPR assay journals — metaphor bridges planning conversations without substituting for assay-specific validation.\n",
      "translation_table": [
        {
          "field_a_term": "sgRNA / barcode identifier attached to a perturbation construct",
          "field_b_term": "Codeword symbol in a block code alphabet",
          "note": "Informatics abstraction for pooled decoding responsibilities only."
        },
        {
          "field_a_term": "Sequencing dropout / PCR skew degrading identity calls",
          "field_b_term": "Erasure / asymmetric noise channels in coding theory",
          "note": "Shared worry motivates replicate libraries and diversity spacing."
        },
        {
          "field_a_term": "Multiplex pool complexity versus assay saturation",
          "field_b_term": "Code rate versus minimum distance tradeoffs (informal intuition)",
          "note": "Prevents oversubscribing pools without statistical separation guarantees."
        }
      ],
      "references": [
        {
          "doi": "10.1126/science.1247005",
          "note": "Shalem et al. (2014) — genome-scale CRISPR-Cas9 knockout screening in human cells"
        },
        {
          "doi": "10.2307/1907238",
          "note": "Reed & Solomon (1960) — polynomial codes over certain finite fields"
        }
      ],
      "last_reviewed": "2026-05-07",
      "file": "cross-domain/biology-information-theory/b-crispr-multiplex-pooling-x-barcode-redundancy-intuition.yaml"
    },
    {
      "id": "b-genetic-regulatory-boolean-circuits",
      "title": "Kauffman's NK model maps gene regulatory networks onto Boolean circuits — cell types are attractors and the critical K=2 regime corresponds to edge-of-chaos dynamics",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "Kauffman (1969) modeled gene regulatory networks as Boolean networks: N genes each updated by a Boolean function of K randomly chosen inputs. For K < 2, networks freeze in ordered attractors; for K > 2, trajectories are chaotic (exponential sensitivity to initial conditions). At K = 2, networks self",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-kauffman-critical-k2-attractor-cell-types"
      ],
      "communication_gap": "Systems biology and network medicine literatures are aware of Kauffman's NK model but treat it as qualitative metaphor rather than quantitative framework. The Boolean circuit / computational complexity connection (random circuit phase transitions) is not part of standard bioinformatics or systems biology training.\n",
      "translation_table": [
        {
          "field_a_term": "gene (node in Boolean network)",
          "field_b_term": "logic gate in Boolean circuit",
          "note": "Each gene has K inputs and one Boolean function (output = on/off expression)"
        },
        {
          "field_a_term": "K (number of regulatory inputs per gene)",
          "field_b_term": "fan-in of logic gate",
          "note": "K = 2 corresponds to NAND/NOR gate connectivity — sufficient for universality"
        },
        {
          "field_a_term": "attractor of Boolean network dynamics",
          "field_b_term": "stable computational state / register value",
          "note": "Cell types = attractors; ~sqrt(N) attractors in critical K=2 networks"
        },
        {
          "field_a_term": "perturbation spreading (Hamming distance between trajectories)",
          "field_b_term": "sensitivity of circuit output to input changes",
          "note": "Ordered phase: perturbations die out; Chaotic phase: perturbations spread; Critical: marginal"
        }
      ],
      "references": [
        {
          "doi": "10.1016/S0022-5193(69)80016-0",
          "note": "Kauffman, S.A. (1969). Metabolic stability and epigenesis in randomly constructed genetic nets. J Theor Biol 22:437."
        },
        {
          "note": "Kauffman, S.A. (1993). The Origins of Order: Self-Organization and Selection in Evolution. Oxford University Press."
        },
        {
          "doi": "10.1016/S0022-5193(03)00028-6",
          "note": "Aldana et al. (2003). Boolean dynamics of networks with scale-free topology. J Theor Biol 223:433."
        },
        {
          "doi": "10.1209/0295-5075/1/2/001",
          "note": "Derrida & Pomeau (1986). Random networks of automata: a simple annealed approximation. Europhys Lett 1:45."
        }
      ],
      "last_reviewed": "2026-05-06",
      "file": "cross-domain/biology-information-theory/b-genetic-regulatory-boolean-circuits.yaml"
    },
    {
      "id": "b-protein-dna-binding-information-theoretic-specificity",
      "title": "The sequence specificity of protein-DNA binding is quantified by information theory: the sequence logo information content (bits) equals the reduction in positional entropy, and the total information in a binding site predicts the number of sites in a genome.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "Schneider & Stephens (1990) showed that transcription factor binding sites can be quantified as information in bits: the information content Ri = 2 − H(position), where H is Shannon entropy over the four nucleotide frequencies. The total information in a binding site equals log₂(genome_size / number",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-information-content-predicts-binding-site-number"
      ],
      "communication_gap": "Molecular biologists and information theorists interact mainly through bioinformatics; the deeper theoretical connection (genome as communication channel) is well-developed by Schneider but is not standard in molecular biology curricula or textbooks.\n",
      "translation_table": [
        {
          "field_a_term": "transcription factor binding specificity (molecular biology)",
          "field_b_term": "mutual information between protein and DNA sequence (information theory)",
          "note": "High specificity = high mutual information = low-entropy (conserved) positions in the logo"
        },
        {
          "field_a_term": "position weight matrix (molecular biology)",
          "field_b_term": "log-likelihood ratio / information weight (information theory)",
          "note": "PWM entries are log(p_observed/p_background) — pointwise mutual information"
        },
        {
          "field_a_term": "number of TF binding sites in genome (molecular biology)",
          "field_b_term": "channel capacity constraint (information theory)",
          "note": "R_total ≈ log₂(G/n) — the genome acts as a noisy channel locating n sites"
        },
        {
          "field_a_term": "binding affinity / KD (molecular biology)",
          "field_b_term": "log-probability / free energy in information-theoretic channel (information theory)",
          "note": "ΔG = -RT·Ri (binding free energy proportional to information content per site)"
        }
      ],
      "references": [
        {
          "doi": "10.1093/nar/18.20.6097",
          "note": "Schneider & Stephens (1990) — sequence logos and information content of binding sites"
        },
        {
          "doi": "10.1006/jtbi.2000.2174",
          "note": "Schneider (2000) — information content of individual genetic sequences"
        },
        {
          "doi": "10.1093/nar/gky1013",
          "note": "Stormo (2013) — DNA binding sites — representation and discovery"
        }
      ],
      "last_reviewed": "2026-05-07",
      "file": "cross-domain/biology-information-theory/b-protein-dna-binding-information-theoretic-specificity.yaml"
    },
    {
      "id": "b-gnn-x-gene-regulatory-network-perturbation-priors",
      "title": "Graph neural network message passing bridges relational inductive biases and gene regulatory perturbation priors.",
      "source_domain": "",
      "target_domain": "",
      "status": "proposed",
      "bridge_claim": "Speculative analogy (to be empirically validated): Message passing over learned gene graphs can act as a computational analogue to mechanistic regulatory propagation assumptions used in perturbation-response modeling.",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-grn-gnn-priors-improve-perturbation-response-prediction"
      ],
      "communication_gap": "Domain operators prioritize interpretable reliability diagnostics, while ML work often prioritizes aggregate accuracy without deployment-grade uncertainty audits.",
      "translation_table": [
        {
          "field_a_term": "model prior",
          "field_b_term": "domain prior",
          "note": "Both constrain inference in data-sparse regimes."
        },
        {
          "field_a_term": "uncertainty estimate",
          "field_b_term": "risk-aware decision support",
          "note": "Uncertainty quality determines practical utility."
        },
        {
          "field_a_term": "out-of-distribution behavior",
          "field_b_term": "deployment robustness",
          "note": "Shift sensitivity governs real-world reliability."
        }
      ],
      "references": [
        {
          "arxiv": "2005.03675",
          "note": "Graph neural network survey context."
        }
      ],
      "last_reviewed": "2026-05-08",
      "file": "cross-domain/biology-machine-learning/b-gnn-x-gene-regulatory-network-perturbation-priors.yaml"
    },
    {
      "id": "b-cell-division-x-branching-process",
      "title": "Cell division ↔ Branching process — tumor growth as Galton-Watson process",
      "source_domain": "",
      "target_domain": "",
      "status": "proposed",
      "bridge_claim": "Tumor clonal evolution is a Galton-Watson branching process where each cancer cell independently divides, dies, or differentiates with fixed probabilities; extinction probability (tumor elimination), survival probability (progression), and clone size distribution are all exactly computable from bran",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-cell-division-x-branching-process"
      ],
      "communication_gap": "Galton-Watson processes were developed by Victorian mathematicians (Galton 1873) to study family name extinction; modern branching process theory is a probability textbook topic. Cancer biologists developed the clonal evolution theory (Nowell 1976) using mutation accumulation language without reference to branching processes. The connection was made rigorously by Nowak et al. (2002) and Michor et al. (2004), but is still not standard in cancer biology textbooks.",
      "translation_table": [
        {
          "field_a_term": "cancer cell division (produces two daughter cells)",
          "field_b_term": "reproduction event in Galton-Watson process (one individual → k offspring)",
          "note": "Division probability p_d ≡ P(offspring = 2); death probability p_0 ≡ P(offspring = 0)"
        },
        {
          "field_a_term": "tumour extinction (all cancer cells die before establishing)",
          "field_b_term": "Galton-Watson extinction (probability q = smallest fixed point of PGF)",
          "note": "Extinction probability q satisfies q = G(q) where G is the offspring PGF; q < 1 iff E[offspring] > 1"
        },
        {
          "field_a_term": "clonal dynamics (mutant cell founder effect in tumour)",
          "field_b_term": "family size distribution in supercritical branching process",
          "note": "Clone size distribution follows power law at criticality (E[offspring] = 1)"
        },
        {
          "field_a_term": "driver mutation fitness advantage s (increases net division rate)",
          "field_b_term": "supercritical branching parameter m = E[offspring] = 1 + s",
          "note": "Establishment probability of driver clone ≈ 2s for small s (Fisher-Haldane result)"
        }
      ],
      "references": [
        {
          "doi": "10.1073/pnas.1010978107",
          "note": "Tomasetti & Vogelstein (2015) — variation in cancer risk via branching process theory; Science"
        },
        {
          "doi": "10.1126/science.1235122",
          "note": "Tomasetti et al. (2013) — cancer evolution as branching process; PNAS"
        }
      ],
      "last_reviewed": "2026-05-07",
      "file": "cross-domain/biology-math/b-cell-division-x-branching-process.yaml"
    },
    {
      "id": "b-developmental-gradient-x-pde",
      "title": "Developmental gradients x Reaction-diffusion PDE — morphogen as chemical wave\n",
      "source_domain": "",
      "target_domain": "",
      "status": "proposed",
      "bridge_claim": "Turing's reaction-diffusion mechanism (1952) generates spatial patterns in morphogen concentration gradients that specify body axis patterning in embryos; stripe width, spot size, and axis polarity are determined by the ratio of diffusion coefficients and reaction rates — a purely mathematical predi",
      "open_unknowns": [],
      "related_hypotheses": [],
      "communication_gap": "Developmental biologists working on morphogenesis and applied mathematicians studying PDEs share the Turing framework but the experimental confirmation of Turing patterns in vertebrate development came 60 years after the 1952 prediction, partly due to disciplinary siloing.\n",
      "translation_table": [
        {
          "field_a_term": "morphogen gradient (developmental biology)",
          "field_b_term": "activator-inhibitor concentration field (mathematics)",
          "note": "The morphogen acts as an activator whose local production is balanced by a faster-diffusing inhibitor, producing stable spatial patterns"
        },
        {
          "field_a_term": "body axis specification (biology)",
          "field_b_term": "symmetry breaking in reaction-diffusion PDE (mathematics)",
          "note": "A homogeneous steady state becomes unstable to spatial perturbations (Turing instability) at a critical diffusion ratio, selecting a preferred wavelength"
        },
        {
          "field_a_term": "stripe or spot pattern wavelength (biology)",
          "field_b_term": "dominant unstable mode of linearized PDE (mathematics)",
          "note": "Pattern scale is set by sqrt(D_inhibitor/D_activator) times reaction rate constants — a purely mathematical prediction"
        }
      ],
      "references": [
        {
          "doi": "10.1098/rstb.1952.0012",
          "note": "Turing (1952) - The chemical basis of morphogenesis; Phil Trans R Soc B 237:37"
        }
      ],
      "last_reviewed": "2026-05-07",
      "file": "cross-domain/biology-math/b-developmental-gradient-x-pde.yaml"
    },
    {
      "id": "b-ecological-succession-x-markov",
      "title": "Ecological Succession x Markov Chains — community assembly as transition matrix\n",
      "source_domain": "",
      "target_domain": "",
      "status": "proposed",
      "bridge_claim": "Ecological succession (community change over time after disturbance) is modeled as a Markov chain where states are community types and transition probabilities depend only on current composition; the stationary distribution of the chain predicts the climax community, and mixing time predicts recover",
      "open_unknowns": [],
      "related_hypotheses": [],
      "communication_gap": "Succession ecologists developed empirical community transition matrices in the 1970s (Usher 1979, Horn 1975) without using the full Markov chain mathematical framework (ergodicity, spectral gap, mixing time); mathematical Markov chain theory could provide quantitative recovery time predictions and robustness analyses that ecological field studies have not systematically exploited.\n",
      "translation_table": [
        {
          "field_a_term": "Seral community stages (pioneer → climax)",
          "field_b_term": "Markov chain states",
          "note": "Each successional community type (bare rock, pioneer herbs, shrubs, climax forest) maps to a state; the transition probability matrix encodes empirically measured probabilities of community change over a fixed time step.\n"
        },
        {
          "field_a_term": "Climax community (stable endpoint)",
          "field_b_term": "Stationary distribution of Markov chain",
          "note": "The stationary distribution π gives the long-run frequency of each community type; if the chain is ergodic (all states communicate), π is unique and equals the climax community composition.\n"
        },
        {
          "field_a_term": "Succession timescale (decades to centuries)",
          "field_b_term": "Markov chain mixing time (convergence to stationarity)",
          "note": "The mixing time bounds how long succession takes; it is determined by the spectral gap (1 - λ₂) of the transition matrix — large spectral gap → fast succession, small gap → slow recovery from disturbance.\n"
        },
        {
          "field_a_term": "Disturbance-reset (fire, logging, storm)",
          "field_b_term": "Return to initial Markov chain state",
          "note": "Disturbance resets the community to an early successional state; the expected time to return to the stationary distribution (mixing time) is the ecological recovery time.\n"
        }
      ],
      "references": [
        {
          "doi": "10.2307/1936612",
          "note": "Horn (1975) — Markovian properties of forest succession; Ecology 56:1401"
        }
      ],
      "last_reviewed": "2026-05-07",
      "file": "cross-domain/biology-math/b-ecological-succession-x-markov.yaml"
    },
    {
      "id": "b-ecology-x-coexistence-theory",
      "title": "Ecological coexistence ↔ Modern coexistence theory — storage effect as temporal niche",
      "source_domain": "",
      "target_domain": "",
      "status": "proposed",
      "bridge_claim": "Modern coexistence theory (Chesson 2000) partitions species coexistence mechanisms into stabilising (niche differences) and equalising (fitness similarity) components; the storage effect (temporal fluctuation buffering via overlapping generations) is a stabilising mechanism that allows indefinite co",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-ecology-x-coexistence-theory"
      ],
      "communication_gap": "Modern coexistence theory (Chesson 2000) is a theoretical ecology framework developed in mathematical ecology journals (Theoretical Population Biology, American Naturalist). Community ecologists who study species diversity in the field rarely read or apply MCT's mathematical decomposition. The storage effect mechanism was proposed in 1982 but is only now being tested empirically. Applied ecology and agriculture have been particularly slow to adopt the MCT framework for diversity management.",
      "translation_table": [
        {
          "field_a_term": "stabilising niche difference (Δi) in MCT",
          "field_b_term": "competitive exclusion avoidance via resource partitioning",
          "note": "Δi > 0 prevents competitive exclusion; measured as the covariance of environment and competition"
        },
        {
          "field_a_term": "equalising fitness difference (ηi) in MCT",
          "field_b_term": "intrinsic growth rate difference between competitors at equal density",
          "note": "ηi measures competitive inequality; large ηi requires large Δi for coexistence"
        },
        {
          "field_a_term": "storage effect (covariance of environment and competition, buffered reproduction)",
          "field_b_term": "temporal Jensen's inequality exploitation via life history buffering",
          "note": "Long-lived stages (seeds, dormancy) allow species to 'bank' good years against bad ones"
        },
        {
          "field_a_term": "invasion criterion (can a rare species increase when common species present?)",
          "field_b_term": "Lyapunov stability criterion for multispecies dynamical system",
          "note": "Coexistence iff all species have positive invasion growth rates; equivalent to mutual invasibility"
        }
      ],
      "references": [
        {
          "doi": "10.1146/annurev.ecolsys.31.1.343",
          "note": "Chesson (2000) — mechanisms of maintenance of species diversity; Annu Rev Ecol Syst 31:343"
        },
        {
          "doi": "10.1016/0040-5809(82)90040-5",
          "note": "Chesson & Warner (1981) — environmental variability promotes coexistence in lottery competitive systems"
        }
      ],
      "last_reviewed": "2026-05-07",
      "file": "cross-domain/biology-math/b-ecology-x-coexistence-theory.yaml"
    },
    {
      "id": "b-epigenetic-landscape-x-attractor",
      "title": "Waddington's epigenetic landscape x Dynamical attractor - cell fate as basin of attraction\n",
      "source_domain": "",
      "target_domain": "",
      "status": "proposed",
      "bridge_claim": "Waddington's metaphorical epigenetic landscape (1957) is formalized as a dynamical system where cell types are stable point attractors of the gene regulatory network (GRN); cellular differentiation is a bifurcation (attractor loss via pitchfork bifurcation as transcription factor concentrations cros",
      "open_unknowns": [],
      "related_hypotheses": [],
      "communication_gap": "Developmental biologists interpreting Waddington's landscape metaphorically and dynamical systems theorists studying attractor bifurcations developed independent frameworks; Huang et al. (2005) and Kauffman (1969) formalized the connection, but the quantitative derivation of the Waddington landscape from measured GRN parameters and the prediction of reprogramming efficiency from attractor barrier heights remain active research challenges.\n",
      "translation_table": [
        {
          "field_a_term": "differentiated cell type (stem cell, neuron, cardiomyocyte) (developmental biology)",
          "field_b_term": "stable fixed point attractor of gene regulatory network ODE (dynamical systems)",
          "note": "Each cell type corresponds to a distinct attractor; the GRN is the vector field and transcription factor concentrations are state variables"
        },
        {
          "field_a_term": "developmental trajectory from progenitor to differentiated cell (developmental biology)",
          "field_b_term": "trajectory approaching attractor basin of differentiated state (dynamical systems)",
          "note": "Development follows the Waddington valley = attractor basin; cell fate commitment = entering basin of attraction"
        },
        {
          "field_a_term": "Yamanaka reprogramming to iPSCs (dedifferentiation) (cell biology)",
          "field_b_term": "noise-driven escape from attractor over potential barrier (stochastic dynamical systems)",
          "note": "Reprogramming factors (Oct4, Sox2, Klf4, cMyc) pertub the GRN to cross the Waddington potential barrier between attractors"
        },
        {
          "field_a_term": "lineage bifurcation at developmental decision point (developmental biology)",
          "field_b_term": "pitchfork bifurcation as parameter crosses critical value (dynamical systems)",
          "note": "Lineage commitment is a bifurcation where one attractor splits into two; the bifurcation parameter is a morphogen concentration"
        }
      ],
      "references": [
        {
          "doi": "10.1126/science.1167609",
          "note": "Huang et al. (2005/2009) - Cancer attractors: a systems biology view of tumors; Seminars Cell Dev Biol — attractor model of cell fate"
        }
      ],
      "last_reviewed": "2026-05-07",
      "file": "cross-domain/biology-math/b-epigenetic-landscape-x-attractor.yaml"
    },
    {
      "id": "b-game-theory-x-antibiotic-resistance",
      "title": "Game Theory x Antibiotic Resistance - evolutionary game dynamics of resistance\n",
      "source_domain": "",
      "target_domain": "",
      "status": "proposed",
      "bridge_claim": "Antibiotic resistance evolution in polymicrobial communities is a multi-player evolutionary game: resistant cells pay a fitness cost but provide a public good (beta-lactamase secretion) to sensitive cells in a producer-cheater dynamic; spatial structure converts this tragedy of the commons into a sn",
      "open_unknowns": [],
      "related_hypotheses": [],
      "communication_gap": "Evolutionary game theory and antimicrobial resistance are both active fields but exchange has been limited; Gore, van Oudenaarden and collaborators (2009) explicitly formalized resistance evolution as a public goods game, but most clinical resistance management ignores game-theoretic predictions.\n",
      "translation_table": [
        {
          "field_a_term": "Resistant bacterium (beta-lactamase producer)",
          "field_b_term": "Cooperator in public goods game",
          "note": "Resistant bacteria pay a metabolic cost (expressing beta-lactamase) but secrete the enzyme extracellularly, destroying antibiotic for all nearby cells - a public good; sensitive cells free-ride by benefiting without paying the resistance cost.\n"
        },
        {
          "field_a_term": "Sensitive bacterium (non-producer)",
          "field_b_term": "Defector / free-rider in public goods game",
          "note": "Sensitive bacteria avoid the metabolic cost of resistance but benefit from antibiotic destruction by nearby resistant cells; in mixed culture without antibiotics, sensitives outcompete resistants (defectors win).\n"
        },
        {
          "field_a_term": "Antibiotic concentration gradient",
          "field_b_term": "Game payoff matrix parameter",
          "note": "The antibiotic concentration determines the benefit of beta-lactamase (high concentration = high public good value) and thus shifts the payoff matrix from a prisoner's dilemma (no coexistence) to a snowdrift game (stable coexistence) as concentration increases.\n"
        },
        {
          "field_a_term": "Spatial structure (biofilm, agar plate)",
          "field_b_term": "Lattice game model (spatial prisoner's dilemma)",
          "note": "Spatial structure (limited mixing) favors cooperators (resistants) by allowing them to cluster and receive benefit from neighbors; this converts the tragedy of the commons into a spatial snowdrift game with higher cooperator frequencies at Nash equilibrium.\n"
        }
      ],
      "references": [
        {
          "doi": "10.1038/msb.2011.35",
          "note": "Gore, Youk & van Oudenaarden (2009) - snowdrift game dynamics and facultative cheating in yeast; Nature 459:253"
        },
        {
          "doi": "10.1073/pnas.1100100108",
          "note": "Yurtsev et al. (2013) - bacterial cheating drives antibiotic resistance; Mol Syst Biol"
        }
      ],
      "last_reviewed": "2026-05-07",
      "file": "cross-domain/biology-math/b-game-theory-x-antibiotic-resistance.yaml"
    },
    {
      "id": "b-gut-microbiome-x-lotka-volterra",
      "title": "Microbial Ecology x Lotka-Volterra — gut microbiome as generalized competitive system\n",
      "source_domain": "",
      "target_domain": "",
      "status": "proposed",
      "bridge_claim": "The gut microbiome's species abundance dynamics are quantitatively modeled by generalized Lotka-Volterra equations with interaction matrices inferred from time-series data; stable coexistence corresponds to a feasible equilibrium with all positive species abundances, and dysbiosis is a bifurcation t",
      "open_unknowns": [],
      "related_hypotheses": [],
      "communication_gap": "Microbiology developed culture-based and 16S sequencing approaches for characterizing the microbiome without quantitative dynamical models; theoretical ecology had gLV frameworks for decades but lacked microbiome time-series data; the integration accelerated with the Human Microbiome Project (2012) providing the necessary datasets.\n",
      "translation_table": [
        {
          "field_a_term": "Microbial species abundance x_i",
          "field_b_term": "Population variable in gLV system",
          "note": "Each species abundance obeys dx_i/dt = x_i(r_i + Σ_j A_ij x_j), where r_i is intrinsic growth rate and A_ij are interaction coefficients (competition, mutualism, predation) inferred from longitudinal 16S rRNA data.\n"
        },
        {
          "field_a_term": "Microbiome interaction matrix A_ij",
          "field_b_term": "Community matrix in ecological stability theory",
          "note": "The eigenspectrum of A determines community stability; random matrix theory (May 1972) predicts a stability-complexity trade-off: highly connected microbiomes are less stable, consistent with observed dysbiosis susceptibility.\n"
        },
        {
          "field_a_term": "Dysbiosis (loss of healthy microbiome)",
          "field_b_term": "Bifurcation to alternative attractor",
          "note": "The healthy microbiome and dysbiotic state are distinct stable equilibria of the gLV system; transitions between them (e.g., antibiotic perturbation) are saddle-node bifurcations with hysteresis — explaining why dysbiosis is difficult to reverse.\n"
        },
        {
          "field_a_term": "Fecal microbiota transplant (FMT) efficacy",
          "field_b_term": "Basin of attraction size for healthy equilibrium",
          "note": "FMT works by pushing the system from the dysbiotic basin of attraction into the healthy basin; ecological theory predicts which perturbation magnitudes and compositions are sufficient.\n"
        }
      ],
      "references": [
        {
          "doi": "10.1371/journal.pcbi.1003388",
          "note": "Stein et al. (2013) — Ecological modeling from time-series inference; PLOS Comput Biol 9:e1003388"
        }
      ],
      "last_reviewed": "2026-05-07",
      "file": "cross-domain/biology-math/b-gut-microbiome-x-lotka-volterra.yaml"
    },
    {
      "id": "b-neutral-theory-x-stochastic-sampling",
      "title": "Neutral theory ↔ Stochastic sampling — biodiversity as random drift",
      "source_domain": "",
      "target_domain": "",
      "status": "proposed",
      "bridge_claim": "Hubbell's unified neutral theory of biodiversity (2001) treats all species as ecologically equivalent, with diversity maintained by stochastic birth-death-immigration; the species abundance distribution follows a log-series or Poisson-Dirichlet distribution derivable from Ewens' sampling formula in ",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-neutral-theory-x-stochastic-sampling"
      ],
      "communication_gap": "Hubbell's neutral theory (2001) was explicitly inspired by neutral population genetics (Kimura 1968, Ewens 1972), but the mathematical equivalence (Poisson-Dirichlet distributions) was only fully formalised by Etienne & Olff (2004). Mainstream ecologists studying competition and niche theory rejected neutral theory as biologically unrealistic, while statisticians and population geneticists who understood the Ewens sampling formula rarely engaged with community ecology.",
      "translation_table": [
        {
          "field_a_term": "species abundance distribution (SAD) in neutral theory",
          "field_b_term": "allele frequency distribution in neutral population genetics (Ewens formula)",
          "note": "Ewens' sampling formula gives the exact distribution of allele frequencies in neutral Kingman coalescent; maps directly to SAD"
        },
        {
          "field_a_term": "fundamental biodiversity number θ = 2Jm ν (Hubbell's neutral parameter)",
          "field_b_term": "population mutation rate 4Nμ in Ewens sampling formula",
          "note": "θ sets the log-series slope; controls diversity analogously to population genetics mutation-drift balance"
        },
        {
          "field_a_term": "local community immigration rate m (from metacommunity)",
          "field_b_term": "migration rate m in island model of population genetics",
          "note": "Immigration determines local SAD deviation from metacommunity log-series; both follow Wright island model"
        },
        {
          "field_a_term": "speciation rate ν (probability of new species per birth event)",
          "field_b_term": "mutation rate μ in neutral allele model",
          "note": "Speciation in neutral theory is the ecological equivalent of mutation in population genetics"
        }
      ],
      "references": [
        {
          "doi": "10.1046/j.1461-0248.2003.00503.x",
          "note": "Hubbell (2001) — The Unified Neutral Theory of Biodiversity and Biogeography; Princeton UP"
        },
        {
          "doi": "10.1093/genetics/68.4.577",
          "note": "Ewens (1972) — the sampling theory of selectively neutral alleles; Theoretical Population Biology"
        }
      ],
      "last_reviewed": "2026-05-07",
      "file": "cross-domain/biology-math/b-neutral-theory-x-stochastic-sampling.yaml"
    },
    {
      "id": "b-phylogenetics-x-coalescent-theory",
      "title": "Phylogenetics x Coalescent theory — gene tree as reverse-time branching process\n",
      "source_domain": "",
      "target_domain": "",
      "status": "proposed",
      "bridge_claim": "Kingman's coalescent describes how ancestral lineages merge going backward in time in a population of size N; the coalescent rate (1/N per pair of lineages per generation) determines phylogenetic branch lengths and enables Bayesian molecular clock dating — connecting population genetics to the branc",
      "open_unknowns": [],
      "related_hypotheses": [],
      "communication_gap": "Evolutionary biologists studying phylogenetics and probabilists studying branching processes worked separately until Kingman's 1982 coalescent provided the rigorous mathematical foundation; Bayesian phylogenetic software (BEAST, MrBayes) brought the framework to empirical biology but the connection to general continuous-time Markov chain theory is rarely explicit.\n",
      "translation_table": [
        {
          "field_a_term": "phylogenetic tree branch length (evolutionary biology)",
          "field_b_term": "coalescent waiting time ~ Exp(C(k,2)/N) (probability theory)",
          "note": "The expected time for k lineages to coalesce to k-1 is N/(k(k-1)/2) generations; branch length encodes population size"
        },
        {
          "field_a_term": "most recent common ancestor (MRCA) (phylogenetics)",
          "field_b_term": "absorption state of coalescent Markov chain (probability theory)",
          "note": "The MRCA corresponds to coalescence of all lineages to one; the time to MRCA is the sum of all coalescent waiting times"
        },
        {
          "field_a_term": "population bottleneck (evolutionary biology)",
          "field_b_term": "reduction in N causing accelerated coalescent rate (probability theory)",
          "note": "Bottlenecks dramatically shorten branch lengths by increasing the coalescent rate, producing star-shaped phylogenies"
        },
        {
          "field_a_term": "recombination in population genetics (biology)",
          "field_b_term": "ancestral recombination graph (ARG) (mathematics)",
          "note": "Recombination extends the coalescent to the ARG, where lineages can split as well as merge going backward in time"
        }
      ],
      "references": [
        {
          "doi": "10.1016/0304-4149(82)90011-4",
          "note": "Kingman (1982) - The coalescent; Stochastic Processes and their Applications 13:235"
        }
      ],
      "last_reviewed": "2026-05-07",
      "file": "cross-domain/biology-math/b-phylogenetics-x-coalescent-theory.yaml"
    },
    {
      "id": "b-population-genetics-x-random-matrix",
      "title": "Population genetics x Random matrix theory — allele covariance as Wishart ensemble\n",
      "source_domain": "",
      "target_domain": "",
      "status": "proposed",
      "bridge_claim": "The covariance matrix of allele frequencies across a neutrally evolving population follows the Marchenko-Pastur distribution of the Wishart random matrix ensemble; deviations from this null distribution identify loci under selection, providing a principled statistical test for selective sweeps.\n",
      "open_unknowns": [],
      "related_hypotheses": [],
      "communication_gap": "Population geneticists and random matrix theorists publish in entirely separate journals. The RMT null model for population structure was proposed by Patterson, Price & Reich (2006) but RMT tools are rarely imported into population genetics software packages.\n",
      "translation_table": [
        {
          "field_a_term": "Allele frequency covariance matrix (neutral evolution)",
          "field_b_term": "Wishart random matrix ensemble",
          "note": "Under drift alone, the sample covariance matrix of allele frequencies is a Wishart matrix; its eigenvalue spectrum follows the Marchenko-Pastur law.\n"
        },
        {
          "field_a_term": "Eigenvalue outliers (selected loci)",
          "field_b_term": "Spiked covariance matrix deviations from MP bulk",
          "note": "Loci under selection create eigenvalue outliers above the Marchenko-Pastur upper edge — exactly the signal RMT spike detection methods identify.\n"
        }
      ],
      "references": [
        {
          "doi": "10.1371/journal.pgen.0020190",
          "note": "Patterson, Price & Reich (2006) — population structure and eigenanalysis; foundational RMT connection"
        }
      ],
      "last_reviewed": "2026-05-07",
      "file": "cross-domain/biology-math/b-population-genetics-x-random-matrix.yaml"
    },
    {
      "id": "b-protein-folding-x-energy-landscape",
      "title": "Protein folding x Energy landscape theory - funnel topology as folding code\n",
      "source_domain": "",
      "target_domain": "",
      "status": "proposed",
      "bridge_claim": "The protein folding problem is solved when the free energy landscape has a funnel topology directing all unfolded conformations toward the native state; frustration (conflicting interactions between residues) flattens the funnel and causes misfolding, kinetic traps, and aggregation — connecting prot",
      "open_unknowns": [],
      "related_hypotheses": [],
      "communication_gap": "Biochemists studying protein folding kinetics and physicists studying spin glasses and energy landscapes developed independent frameworks; the energy landscape theory of protein folding (Wolynes, Onuchic, Dill — 1990s) synthesized these, but the connection to AlphaFold2 (which implicitly uses funnel topology through attention mechanisms) has not been fully formalized, representing an outstanding bridge between theory and modern ML-based structure prediction.\n",
      "translation_table": [
        {
          "field_a_term": "protein native state (structural biology)",
          "field_b_term": "global free energy minimum / ground state of energy landscape (statistical mechanics)",
          "note": "The native fold is the ground state; Anfinsen's thermodynamic hypothesis states all information is in the sequence (landscape)"
        },
        {
          "field_a_term": "protein misfolding and aggregation (biochemistry)",
          "field_b_term": "kinetic trapping in local free energy minima / frustrated landscape (spin glass theory)",
          "note": "Frustration from competing interactions creates local minima that trap misfolded states — the spin glass analogy is exact"
        },
        {
          "field_a_term": "Levinthal's paradox — folding faster than random search (biophysics)",
          "field_b_term": "funnel landscape topology biasing search toward native state (statistical mechanics)",
          "note": "The funnel resolves Levinthal's paradox: the landscape is not random but biased toward the native state by evolution"
        },
        {
          "field_a_term": "phi-value analysis of folding transition state (biophysics)",
          "field_b_term": "saddle point / transition state geometry on energy landscape (physics)",
          "note": "Phi-values measure the fraction of native contacts at the transition state, mapping out the landscape saddle topology"
        }
      ],
      "references": [
        {
          "doi": "10.1038/nsb0198-10",
          "note": "Dill & Chan (1997) - From Levinthal to pathways to funnels; Nature Struct Biol 4:10 — funnel topology review"
        }
      ],
      "last_reviewed": "2026-05-07",
      "file": "cross-domain/biology-math/b-protein-folding-x-energy-landscape.yaml"
    },
    {
      "id": "b-scale-free-network-x-metabolic",
      "title": "Scale-free networks x Metabolic networks - power-law hubs as metabolic bottlenecks\n",
      "source_domain": "",
      "target_domain": "",
      "status": "proposed",
      "bridge_claim": "Metabolic networks in all organisms exhibit scale-free topology (power-law degree distribution P(k) ~ k^-gamma with gamma ~ 2.2) because highly-connected metabolites (ATP, NADH, pyruvate, glutamate) were added earliest in evolutionary history via preferential attachment — the same mechanism generati",
      "open_unknowns": [],
      "related_hypotheses": [],
      "communication_gap": "Systems biologists reconstructing metabolic networks (Palsson 2000s) and network scientists developing scale-free network theory (Barabasi & Albert 1999) worked in parallel; Jeong et al. (2000) demonstrated the connection for metabolic networks, but the evolutionary mechanism (preferential attachment as a model for metabolic evolution) and its implications for drug targeting (attack hubs to fragment pathogen metabolism) remain underutilized in pharmaceutical research.\n",
      "translation_table": [
        {
          "field_a_term": "hub metabolite (ATP, NADH, pyruvate) with k >> <k> (systems biology)",
          "field_b_term": "hub node in scale-free network with degree k following power law (network science)",
          "note": "The most-connected metabolites are hubs; their removal fragments the metabolic network analogous to targeted attacks on Barabasi-Albert graphs"
        },
        {
          "field_a_term": "preferential attachment in metabolic evolution (evolutionary biology)",
          "field_b_term": "Barabasi-Albert preferential attachment mechanism for scale-free growth (network science)",
          "note": "New metabolic reactions preferentially connect to already-central metabolites, generating power-law degree distribution over evolutionary time"
        },
        {
          "field_a_term": "metabolic robustness to enzyme knockouts (systems biology)",
          "field_b_term": "robustness to random node removal in scale-free networks (network science)",
          "note": "Scale-free networks are robust to random failures but fragile to targeted hub attacks; metabolic networks show same asymmetry"
        },
        {
          "field_a_term": "lethal gene knockouts targeting hub enzymes (genetics)",
          "field_b_term": "targeted hub removal causing network percolation threshold fragmentation (network theory)",
          "note": "Essential genes disproportionately encode hub enzymes; lethality correlates with metabolic connectivity degree"
        }
      ],
      "references": [
        {
          "doi": "10.1038/35015116",
          "note": "Jeong et al. (2000) - The large-scale organization of metabolic networks; Nature 407:651"
        }
      ],
      "last_reviewed": "2026-05-07",
      "file": "cross-domain/biology-math/b-scale-free-network-x-metabolic.yaml"
    },
    {
      "id": "b-sir-model-x-compartmental-ode",
      "title": "Epidemic SIR Model x Compartmental ODE — infection as mass action kinetics\n",
      "source_domain": "",
      "target_domain": "",
      "status": "proposed",
      "bridge_claim": "The SIR epidemiological model uses mass-action kinetics (dI/dt = βSI - γI) identical to chemical reaction rate equations; the basic reproduction number R₀ = β/γ is both the epidemic threshold and the dominant eigenvalue of the next-generation matrix — unifying epidemiology with chemical kinetics and",
      "open_unknowns": [],
      "related_hypotheses": [],
      "communication_gap": "Epidemiologists Kermack and McKendrick (1927) derived the SIR model independently of chemical kinetics literature; the equivalence to mass-action reaction networks was noted only decades later, preventing cross-fertilization of methods (e.g., chemical master equation techniques for stochastic epidemics).\n",
      "translation_table": [
        {
          "field_a_term": "Susceptible-Infected-Recovered (SIR) compartments",
          "field_b_term": "Chemical species concentrations",
          "note": "Each compartment (S, I, R) maps to a chemical species; transitions between compartments are reactions with stoichiometric coefficients.\n"
        },
        {
          "field_a_term": "Transmission rate β (mass-action term βSI)",
          "field_b_term": "Bimolecular reaction rate constant",
          "note": "Mass-action kinetics assumes random mixing; βSI is identical in form to a bimolecular reaction A + B → products with rate constant β.\n"
        },
        {
          "field_a_term": "Basic reproduction number R₀ = β/γ",
          "field_b_term": "Dominant eigenvalue of next-generation matrix",
          "note": "R₀ > 1 implies epidemic growth, exactly as the spectral radius > 1 of the next-generation matrix determines instability of the disease-free equilibrium.\n"
        },
        {
          "field_a_term": "Epidemic threshold (R₀ = 1)",
          "field_b_term": "Bifurcation point of ODE system",
          "note": "At R₀ = 1 the disease-free equilibrium undergoes a transcritical bifurcation; this is the mathematical organizing center of epidemic behavior.\n"
        }
      ],
      "references": [
        {
          "doi": "10.1098/rspa.1927.0118",
          "note": "Kermack & McKendrick (1927) — A contribution to the mathematical theory of epidemics; Proc R Soc A 115:700"
        }
      ],
      "last_reviewed": "2026-05-07",
      "file": "cross-domain/biology-math/b-sir-model-x-compartmental-ode.yaml"
    },
    {
      "id": "b-synthetic-biology-x-circuit-design",
      "title": "Synthetic Biology x Electronic Circuit Design - gene circuits as logic gates\n",
      "source_domain": "",
      "target_domain": "",
      "status": "proposed",
      "bridge_claim": "Synthetic gene circuits implement Boolean logic (toggle switches, oscillators, band-pass filters) using the same design principles as electronic circuits; the repressilator (three-gene ring oscillator) is the biological equivalent of a ring oscillator, and synthetic bistable switches match the behav",
      "open_unknowns": [],
      "related_hypotheses": [],
      "communication_gap": "Electrical engineers developed rigorous CAD flows (SPICE simulation, formal verification, modular composition) over 60 years that synthetic biologists are only beginning to import; conversely, the stochastic and evolutionary robustness of gene circuits provides engineering insights absent from deterministic digital design.\n",
      "translation_table": [
        {
          "field_a_term": "Transcription factor repressor (TetR, LacI, cI)",
          "field_b_term": "NOT gate (inverter)",
          "note": "A repressor protein binding its operator prevents transcription of the downstream gene, implementing logical NOT; cascaded inverters form oscillators (repressilator) or bistable switches (toggle switch) just as in CMOS ring oscillators.\n"
        },
        {
          "field_a_term": "Repressilator (3-gene cyclic repressor network)",
          "field_b_term": "Ring oscillator (3-inverter chain with feedback)",
          "note": "Both use an odd number of inversions in a cycle to produce sustained oscillations; the period is determined by the delay around the loop (mRNA/protein half-lives vs gate propagation delay).\n"
        },
        {
          "field_a_term": "Bistable toggle switch (Gardner et al. 2000)",
          "field_b_term": "SR latch (cross-coupled NOR gates)",
          "note": "Mutual repression of two promoters produces two stable states with hysteresis, identical in logic structure to an SR latch; noise-driven switching corresponds to hold-time violations in digital circuits.\n"
        },
        {
          "field_a_term": "Ribosome binding site strength",
          "field_b_term": "Fan-out / drive strength",
          "note": "RBS strength determines translational efficiency and hence how strongly a gene drives downstream processes - the biological analogue of gate drive strength in digital electronics.\n"
        }
      ],
      "references": [
        {
          "doi": "10.1038/35002131",
          "note": "Elowitz & Leibler (2000) - repressilator: a synthetic oscillatory network of transcriptional regulators; Nature 403:335"
        },
        {
          "doi": "10.1038/35002131",
          "note": "Gardner, Cantor & Collins (2000) - construction of a genetic toggle switch; Nature 403:339"
        }
      ],
      "last_reviewed": "2026-05-07",
      "file": "cross-domain/biology-math/b-synthetic-biology-x-circuit-design.yaml"
    },
    {
      "id": "b-allometric-scaling-metabolic-geometry",
      "title": "Allometric scaling laws (metabolic rate ∝ M^(3/4)) arise from the fractal geometry of space-filling resource-distribution networks, mathematically explained by the WBE model as an optimization of hierarchical branching geometry subject to energy-minimization constraints",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "West, Brown, and Enquist (1997) showed that quarter-power allometric scaling emerges from the fractal geometry of vascular and bronchial networks: given a volume-filling branching network with area-preserving junctions and minimized hydrodynamic resistance, metabolic rate B ∝ M^(3/4), because the ne",
      "open_unknowns": [],
      "related_hypotheses": [],
      "communication_gap": "Biologists measure allometric exponents empirically and debate their universality, while mathematicians and physicists study fractal network optimization; the WBE model bridges them but its assumptions (rigid branching rules, laminar flow) remain contested in ecology.",
      "translation_table": [
        {
          "field_a_term": "metabolic rate scaling exponent 3/4 (biology)",
          "field_b_term": "self-similar fractal network dimension D = 3 (mathematics)",
          "note": "The exponent 3/4 = D/(D+1) follows directly from the network's space-filling fractal geometry"
        },
        {
          "field_a_term": "hierarchical vascular branching (biology)",
          "field_b_term": "area-preserving fractal tree with self-similar scaling ratios (mathematics)",
          "note": "Each branching level obeys β^k = n_k * r_k^2 with scale ratio β derived from volume-filling constraint"
        },
        {
          "field_a_term": "mass-specific metabolic rate B/M ∝ M^(-1/4) (biology)",
          "field_b_term": "surface-to-volume scaling of a D=3 fractal (mathematics)",
          "note": "Decreasing metabolic rate per unit mass reflects the geometric efficiency of larger networks"
        },
        {
          "field_a_term": "quarter-power scaling of lifespan, heart rate, growth (biology)",
          "field_b_term": "universal scaling exponents from fractal network constraints (mathematics)",
          "note": "All quarter-power biology follows from the same geometric invariant"
        }
      ],
      "references": [
        {
          "doi": "10.1126/science.276.5309.122",
          "note": "West, Brown, Enquist (1997) Science - original WBE model deriving quarter-power scaling from fractal networks"
        },
        {
          "doi": "10.1038/nature00158",
          "note": "Brown et al. (2002) Nature - metabolic scaling theory applied across life"
        },
        {
          "doi": "10.1371/journal.pbio.1001467",
          "note": "Price et al. (2012) PLoS Biology - testing WBE predictions across plant taxa"
        }
      ],
      "last_reviewed": "2026-05-07",
      "file": "cross-domain/biology-mathematics/b-allometric-scaling-metabolic-geometry.yaml"
    },
    {
      "id": "b-blood-coagulation-cascade-boolean",
      "title": "Blood coagulation is a protease cascade with threshold-switch behavior: the positive feedback loop between thrombin and factor V/VIII generates all-or-none clot formation, modeled as a Boolean network with bistable attractor",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "The coagulation cascade converts soluble fibrinogen to insoluble fibrin via sequential protease activation: TF-VIIa → Xa → IIa (thrombin) → fibrin clot. The cascade has two key positive feedback loops: (1) thrombin activates factor V (cofactor for Xa→IIa), amplifying thrombin production ~100-fold; (",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-blood-coagulation-cascade-boolean"
      ],
      "communication_gap": "Hematologists and intensivists who manage coagulation clinically have detailed knowledge of the cascade components but do not typically use bistability or Boolean network theory to understand threshold behavior and treatment logic. Mathematical biologists who publish coagulation ODE models are largely uncited in clinical hematology despite providing mechanistic predictions for factor replacement therapy thresholds.\n",
      "translation_table": [
        {
          "field_a_term": "thrombin burst (explosive autocatalytic generation)",
          "field_b_term": "supercritical bifurcation above threshold — escape from inhibited OFF state",
          "note": "Below threshold: TF-VIIa + AT-III kinetics are balanced; above threshold: positive feedback dominates"
        },
        {
          "field_a_term": "coagulation factor concentrations (plasma levels)",
          "field_b_term": "parameter space determining location of the bifurcation threshold",
          "note": "Hemophilia A/B: reduced factor VIII/IX shifts bifurcation threshold; partial clotting possible"
        },
        {
          "field_a_term": "anticoagulant proteins (TFPI, antithrombin, protein C)",
          "field_b_term": "inhibitor nodes in the Boolean network that set the OFF attractor",
          "note": "Loss of protein C (thrombophilia) enlarges the ON attractor basin"
        },
        {
          "field_a_term": "clotting time (time to threshold thrombin)",
          "field_b_term": "escape time from the unstable fixed point in the bistable ODE system",
          "note": "Clotting time reflects both stimulus intensity and proximity to bifurcation"
        }
      ],
      "references": [
        {
          "doi": "10.1182/blood-2003-05-1750",
          "note": "Monroe & Hoffman (2006) What does it take to make the perfect clot? Arterioscler Thromb Vasc Biol 26:41"
        },
        {
          "doi": "10.1074/jbc.M410813200",
          "note": "Kuharsky & Fogelson (2001) Surface-mediated control of blood coagulation: the role of binding site densities and platelet deposition. Biophys J 80:1050"
        },
        {
          "doi": "10.1007/s10439-009-9798-x",
          "note": "Luan & Bhatt (2007) Regulation of hemostasis — a thrombin generation analysis. Ann Biomed Eng 35:1487"
        }
      ],
      "last_reviewed": "2026-05-07",
      "file": "cross-domain/biology-mathematics/b-blood-coagulation-cascade-boolean.yaml"
    },
    {
      "id": "b-contact-map-sparsity-x-hessian-low-rank-folding-cooperativity",
      "title": "Native contact maps of proteins are sparse graphs; near-native basins of simplified energy models often exhibit low effective Hessian rank along cooperative contacts — graph sparsity ↔ curvature cooperativity in folding landscapes (structural biology ↔ numerical optimization geometry).\n",
      "source_domain": "",
      "target_domain": "",
      "status": "proposed",
      "bridge_claim": "Order-disorder transitions in folding networks concentrate curvature directions along subsets of contacts that become simultaneously satisfied — resembling low-rank Hessian structure in optimization where only a few eigenmodes dominate early convergence. This **speculative analogy** links contact-gr",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-low-rank-hessian-surrogate-predicts-two-state-phi-profile-class"
      ],
      "communication_gap": "Structural biologists publish contact maps as intuitive summaries; optimization theorists discuss Hessian rank without molecular coordinates — intermediate coarse-graining steps rarely standardized.\n",
      "translation_table": [
        {
          "field_a_term": "adjacency / contact map graph G",
          "field_b_term": "sparsity pattern of Hessian off-diagonal dominance in reduced coordinates",
          "note": "Depends on coordinate choice (Cartesian vs internal)."
        },
        {
          "field_a_term": "cooperative contact clusters (foldons)",
          "field_b_term": "eigenbundle associated with largest Hessian eigenvalues near transition states (for toy models)",
          "note": "Parallel is illustrative; experimental phi-values probe cooperativity differently."
        },
        {
          "field_a_term": "hub residues mediating many contacts",
          "field_b_term": "high leverage directions in sensitivity analysis / dominant Hessian vectors in ENM approximations",
          "note": "Elastic network models already bridge graphs to spectra — we emphasize optimization curvature reading."
        }
      ],
      "references": [
        {
          "doi": "10.1073/pnas.0908149106",
          "note": "Das et al. (2009) — geometric coupling between folding pathways and native topology (PNAS)."
        },
        {
          "doi": "10.1021/jp407558m",
          "note": "Matysiak & Clementi (2006) — folding networks / folding pathways coarse modeling context (J. Phys. Chem. B)."
        }
      ],
      "last_reviewed": "2026-05-07",
      "file": "cross-domain/biology-mathematics/b-contact-map-sparsity-x-hessian-low-rank-folding-cooperativity.yaml"
    },
    {
      "id": "b-cooperative-breeding-kin-selection-inclusive-fitness",
      "title": "Cooperative breeding - where non-breeding helpers assist raising relatives' offspring - is the paradigmatic test of Hamilton's inclusive fitness rule (rB > C): measured relatedness r, fitness benefits B, and costs C in avian cooperative breeders provide the strongest quantitative tests of Hamilton's rule as a mathematical prediction about natural selection.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "Hamilton's (1964) rule states an altruistic allele spreads when rB > C, where r = probability of identity by descent (relatedness), B = fitness benefit to recipient, C = fitness cost to actor. Cooperative breeding provides clean test cases: helpers are typically first-degree relatives (r ~ 0.5 in bi",
      "open_unknowns": [],
      "related_hypotheses": [],
      "communication_gap": "Behavioural ecologists measure helper effects but rarely perform formal Price equation decomposition; population geneticists derive Hamilton's rule abstractly without applying it to field datasets. The quantitative test of rB > C in real populations requires both field ecology and mathematical population genetics.\n",
      "translation_table": [
        {
          "field_a_term": "relatedness coefficient r (mathematics)",
          "field_b_term": "genetic relatedness between helper and recipient offspring (evolutionary biology)",
          "note": "r measured by molecular markers; r ~ 0.5 for siblings; required for inclusive fitness calculation"
        },
        {
          "field_a_term": "Hamilton's rule rB > C (mathematics)",
          "field_b_term": "condition for helper allele to increase in frequency (evolutionary biology)",
          "note": "B = additional offspring raised per helper; C = lost direct reproductive success per helper"
        },
        {
          "field_a_term": "Price equation kin selection term (mathematics)",
          "field_b_term": "indirect fitness gain through effects on relatives (evolutionary biology)",
          "note": "Indirect component of selection = Cov(w_relatives, g) * r; adds to direct fitness"
        },
        {
          "field_a_term": "ecological constraints model (mathematics)",
          "field_b_term": "limited breeding opportunities making helping more profitable than dispersal (evolutionary biology)",
          "note": "Constraint raises C of independence; lowers effective C of helping; shifts rB > C balance"
        }
      ],
      "references": [
        {
          "doi": "10.1006/jtbi.1964.0039",
          "note": "Hamilton (1964) - the genetical evolution of social behaviour I and II"
        },
        {
          "doi": "10.1111/j.1365-2656.2009.01569.x",
          "note": "Cornwallis et al. (2010) - kinship, need, and the evolution of cooperation"
        },
        {
          "doi": "10.1038/nature09963",
          "note": "Lukas & Clutton-Brock (2012) - cooperative breeding and its consequences"
        }
      ],
      "last_reviewed": "2026-05-07",
      "file": "cross-domain/biology-mathematics/b-cooperative-breeding-kin-selection-inclusive-fitness.yaml"
    },
    {
      "id": "b-developmental-geometry-morphogenesis",
      "title": "Biological forms are transformations of each other under smooth coordinate deformations (diffeomorphisms) as proposed by D'Arcy Thompson; modern computational anatomy formalizes this as geodesics on the infinite-dimensional group Diff(M) with the same mathematical structure as ideal fluid mechanics, enabling quantitative comparison of biological shapes across evolution and development.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "D'Arcy Thompson's On Growth and Form (1917): biological forms are transformations of each other under continuous deformations (diffeomorphisms). Fish species' body shapes are related by smooth coordinate transformations — a topological/geometric relationship. Modern computational anatomy (Grenander ",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-developmental-geometry-diffeomorphism-geodesic"
      ],
      "communication_gap": "D'Arcy Thompson's 1917 book was read by biologists and mathematicians but the rigorous formalization (Grenander, Miller 1980s-1990s) happened in the applied mathematics / computer vision community with limited biological uptake until neuroimaging adopted it for MRI brain shape analysis in the 2000s.\n",
      "translation_table": [
        {
          "field_a_term": "fish species body plan comparison (D'Arcy Thompson)",
          "field_b_term": "diffeomorphic image registration (computational anatomy)",
          "note": "both ask what smooth map deforms one shape into another"
        },
        {
          "field_a_term": "biological growth trajectory",
          "field_b_term": "geodesic on Diff(M) with H^1 Sobolev metric",
          "note": "minimizing total deformation energy over time corresponds to geodesic"
        },
        {
          "field_a_term": "evolutionary distance between species body plans",
          "field_b_term": "Riemannian distance on shape space Diff(M)/Diff(M)_0",
          "note": "geodesic distance in shape space as proxy for evolutionary relatedness"
        },
        {
          "field_a_term": "ideal fluid velocity field v(x,t)",
          "field_b_term": "diffeomorphic deformation field φ_t(x) = ∫v dt",
          "note": "Arnold (1966) showed Euler equations = geodesics on SDiff(M); same as LDDMM"
        }
      ],
      "references": [
        {
          "note": "Thompson (1917) On Growth and Form; Cambridge University Press"
        },
        {
          "doi": "10.5802/aif.233",
          "note": "Arnold (1966) Ann Inst Fourier 16:319 — geodesics on SDiff(M) = Euler equations"
        },
        {
          "doi": "10.1090/qam/926445",
          "note": "Grenander & Miller (1998) Q Appl Math 56:617 — diffeomorphic mapping framework"
        },
        {
          "doi": "10.1038/nrn644",
          "note": "Toga & Thompson (2001) Nat Rev Neurosci 2:37 — computational neuroanatomy"
        }
      ],
      "last_reviewed": "2026-05-06",
      "file": "cross-domain/biology-mathematics/b-developmental-geometry-morphogenesis.yaml"
    },
    {
      "id": "b-game-theory-honest-signaling",
      "title": "Zahavi's handicap principle (1975) — that honest signals must be costly to fake — is formalized by Maynard Smith's game-theoretic separating equilibrium, where the Spence-Mirrleesian single-crossing property guarantees that each quality level sends a unique costly signal, explaining peacock tails, stotting gazelles, and birdsong complexity as evolutionarily stable honest communication.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "Amotz Zahavi's handicap principle (1975) proposed that honest signals must impose a cost that is harder to bear for low-quality individuals — otherwise cheaters would invade the population. This biological intuition was mathematically formalized by Maynard Smith (1991) and Grafen (1990) using game t",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-zahavi-handicap-single-crossing-stable-honest"
      ],
      "communication_gap": "Zahavi's 1975 paper was published in Journal of Theoretical Biology and initially received skeptically by evolutionary biologists, while Spence's independent 1973 formalization appeared in Quarterly Journal of Economics. The mathematical equivalence between labor market signaling and animal honest signaling was not widely recognized until Grafen (1990) in Journal of Theoretical Biology and Maynard Smith (1991) in Animal Behaviour. Spence shared the 2001 Nobel in Economics for the signaling work; Zahavi's biological version remains under- appreciated outside behavioral ecology. Economists and evolutionary biologists still rarely cite each other's literatures on signaling.\n",
      "translation_table": [
        {
          "field_a_term": "handicap principle (Zahavi 1975)",
          "field_b_term": "costly signaling / separating equilibrium (Spence-Mirrleees)",
          "note": "Biological honest signaling = economic signaling with single-crossing costs"
        },
        {
          "field_a_term": "single-crossing condition ∂²C/∂s∂q < 0",
          "field_b_term": "cost of the signal decreases with quality (differential cost)",
          "note": "The mathematical condition ensuring high-quality signalers prefer higher signals"
        },
        {
          "field_a_term": "ESS (evolutionarily stable strategy)",
          "field_b_term": "Nash equilibrium of the signaling game",
          "note": "An ESS is a Nash equilibrium that is stable to invasion by rare mutant strategies"
        },
        {
          "field_a_term": "peacock tail / stotting / birdsong (biological examples)",
          "field_b_term": "college education / luxury goods / advertising (economic examples)",
          "note": "The same mathematical structure appears in animal and human honest signaling"
        },
        {
          "field_a_term": "honest signal vs. cheater",
          "field_b_term": "separating vs. pooling equilibrium",
          "note": "In a pooling equilibrium, all types send the same signal — the cheating outcome Zahavi argued is unstable"
        },
        {
          "field_a_term": "Fisher's runaway selection",
          "field_b_term": "preference-signal coevolution (positive feedback, non-Zahavian)",
          "note": "Runaway selection vs. handicap principle are mathematically distinct mechanisms for ornament evolution"
        }
      ],
      "references": [
        {
          "note": "Zahavi (1975) — Mate selection: a selection for a handicap",
          "doi": "10.1016/0022-5193(75)90111-3"
        },
        {
          "note": "Maynard Smith (1991) — Honest signalling: the Philip Sidney game",
          "doi": "10.1016/S0003-3472(05)80683-5"
        },
        {
          "note": "Grafen (1990) — Biological signals as handicaps",
          "doi": "10.1016/S0022-5193(05)80088-8"
        },
        {
          "note": "Spence (1973) — Job market signaling",
          "doi": "10.2307/1882010"
        }
      ],
      "last_reviewed": "2026-05-06",
      "file": "cross-domain/biology-mathematics/b-game-theory-honest-signaling.yaml"
    },
    {
      "id": "b-game-theory-immune-evasion",
      "title": "Evolutionary game theory and immune evasion — host-pathogen arms races are co-evolutionary games whose dynamics follow replicator equations and ESS theory",
      "source_domain": "",
      "target_domain": "",
      "status": "proposed",
      "bridge_claim": "Pathogens and immune systems are engaged in a co-evolutionary arms race formally describable as a repeated evolutionary game. Pathogen antigenic variation = mixed strategy in the immune evasion game: randomizing surface antigens prevents adaptive immune recognition. Immune memory = commitment device",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-red-queen-ess-influenza-diversity-prediction"
      ],
      "communication_gap": "Evolutionary game theory was developed by Maynard Smith with biological applications in mind, but immunology adopted it slowly. Nowak & May (1994) applied replicator dynamics to viral diversity but the framework remained niche. Immunologists primarily use empirical approaches rather than game-theoretic models. The mathematics of frequency-dependent selection and ESS is standard in population genetics but infrequently taught in immunology graduate programs.\n",
      "translation_table": [
        {
          "field_a_term": "pathogen strain frequency x_i",
          "field_b_term": "strategy frequency in evolutionary game",
          "note": "Replicator equation governs both; selection ∝ payoff difference from mean"
        },
        {
          "field_a_term": "antigenic variation (surface protein diversity)",
          "field_b_term": "mixed strategy (randomized action choice)",
          "note": "Pathogen mixes antigens to prevent single dominant immune response"
        },
        {
          "field_a_term": "immune memory (long-lived lymphocytes)",
          "field_b_term": "memory / commitment device in strategic interaction",
          "note": "Past exposure changes payoff matrix for future encounters"
        },
        {
          "field_a_term": "Red Queen co-evolution (arms race dynamics)",
          "field_b_term": "zero-sum repeated game with frequency-dependent payoffs",
          "note": "No ESS in zero-sum game → perpetual cycling; models influenza antigenic drift"
        },
        {
          "field_a_term": "escape mutation (new variant evading immunity)",
          "field_b_term": "strategy deviation testing Nash equilibrium stability",
          "note": "ESS condition: mutant strain cannot invade; immune escape = invading mutant strategy"
        },
        {
          "field_a_term": "original antigenic sin (immunodominance of first exposure)",
          "field_b_term": "prior commitment constraining future strategy space",
          "note": "First exposure narrows subsequent immune response — strategic path dependence"
        }
      ],
      "references": [
        {
          "note": "Van Valen (1973) Evol Theory 1:1 — Red Queen hypothesis of co-evolutionary arms races"
        },
        {
          "doi": "10.1126/science.7754375",
          "note": "Nowak & May (1994) Science 265:1084 — superinfection and the evolution of parasite virulence"
        },
        {
          "note": "Frank (2002) Immunology and Evolution of Infectious Disease (Princeton UP, ISBN 069102607X) — game-theoretic immunology"
        },
        {
          "doi": "10.1006/jtbi.2000.2032",
          "note": "McLean & Bostock (2000) J Theor Biol — evolutionary game theory of immune responses"
        }
      ],
      "last_reviewed": "2026-05-06",
      "file": "cross-domain/biology-mathematics/b-game-theory-immune-evasion.yaml"
    },
    {
      "id": "b-intestinal-crypt-stem-cell-moran-process",
      "title": "Intestinal crypt stem cell competition is a Moran process: a fixed-size pool of stem cells undergoes neutral drift where clones expand and contract stochastically until monoclonality, with fixation probability and time determined by the mathematical theory of finite Moran populations.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "The Moran process models a fixed population of N individuals where, at each step, one individual reproduces and one dies - reproduction is proportional to fitness. For neutral mutations, fixation probability is exactly 1/N and mean fixation time is N*(N-1) cell generations. The intestinal crypt cont",
      "open_unknowns": [],
      "related_hypotheses": [],
      "communication_gap": "Cell biologists study stem cell competition without applying Moran process mathematics; population geneticists study neutral drift without working with intestinal organoids. The quantitative confirmation of Moran predictions in intestinal crypts is one of the best-validated examples of neutral evolution at the cellular scale.\n",
      "translation_table": [
        {
          "field_a_term": "Moran process population size N (mathematics)",
          "field_b_term": "number of functional stem cells per crypt (biology)",
          "note": "N inferred from fixation time distribution; N ~ 5 in mouse small intestine"
        },
        {
          "field_a_term": "fixation probability 1/N (mathematics)",
          "field_b_term": "probability a single mutant stem cell takes over the crypt (biology)",
          "note": "Neutral mutant (e.g., reporter-positive cell) fixes with probability 1/N"
        },
        {
          "field_a_term": "mean fixation time ~ N^2 cell generations (mathematics)",
          "field_b_term": "time for crypt to become monoclonal after labelling (biology)",
          "note": "Mouse small intestine crypts become monoclonal in ~weeks; consistent with N ~ 5"
        },
        {
          "field_a_term": "selection coefficient s (mathematics)",
          "field_b_term": "fitness advantage of oncogenic stem cell mutation (biology)",
          "note": "Even small s > 0 accelerates fixation; explains early cancer clone expansion"
        }
      ],
      "references": [
        {
          "doi": "10.1038/nature08709",
          "note": "Lopez-Garcia et al. (2010) - intestinal stem cell replacement follows a neutral drift process"
        },
        {
          "doi": "10.1038/nature08733",
          "note": "Snippert et al. (2010) - intestinal crypt homeostasis results from neutral competition between symmetrically dividing Lgr5 stem cells"
        },
        {
          "doi": "10.1371/journal.pbio.0040030",
          "note": "Williams et al. (2010) - neutral drift in human colon crypts"
        }
      ],
      "last_reviewed": "2026-05-07",
      "file": "cross-domain/biology-mathematics/b-intestinal-crypt-stem-cell-moran-process.yaml"
    },
    {
      "id": "b-invasion-biology-spreading-speeds",
      "title": "Fisher's reaction-diffusion equation and the Kolmogorov-Petrovsky-Piskunov theorem set the asymptotic spreading speed c* = 2√(rD) for invasive species, while integrodifference equations with fat-tailed dispersal kernels predict accelerating invasions — unifying mathematical wave propagation theory with invasion biology.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "The spread of invasive species is governed by the same mathematics as reaction- diffusion traveling waves. Fisher (1937) and Kolmogorov-Petrovsky-Piskunov (KPP, 1937) independently showed that the equation ∂N/∂t = rN(1-N/K) + D∇²N — combining logistic population growth with Fickian diffusion — admit",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-rlde-satellite-colony-invasion-acceleration-branching-process"
      ],
      "communication_gap": "Mathematical PDE theory of traveling waves (published in Ann Math, J Diff Eq, SIAM J Math Anal) uses functional analysis and existence proofs inaccessible to most ecologists. Invasion biologists publish in Ecology, Biological Invasions, and Global Change Biology, where mathematical rigor is less central. The KPP theorem (1937, in Russian in Bulletin of Moscow State University) took decades to enter Western ecology literature. Fat-tailed dispersal and IDE theory were not widely known to ecologists until Kot et al. (1996) — despite the mathematics being known to probabilists for decades. Empiricists measuring invasion fronts rarely have access to the statistical tools needed to distinguish constant-speed from accelerating invasions.\n",
      "translation_table": [
        {
          "field_a_term": "reaction rate r (per capita growth rate at low density)",
          "field_b_term": "population growth parameter in logistic equation"
        },
        {
          "field_a_term": "diffusion constant D (mean squared displacement per unit time / 2d)",
          "field_b_term": "local dispersal intensity of organism per generation"
        },
        {
          "field_a_term": "minimum wave speed c* = 2√(rD)",
          "field_b_term": "asymptotic invasion front velocity observable in field surveys"
        },
        {
          "field_a_term": "dispersal kernel k(x) (probability distribution of displacement)",
          "field_b_term": "wave packet shape in dispersive wave equation"
        },
        {
          "field_a_term": "fat-tailed kernel (power-law, Cauchy) → divergent M_k(s)",
          "field_b_term": "superdiffusive process → accelerating spread (no constant speed)"
        },
        {
          "field_a_term": "pulled wave (speed set by leading edge dynamics)",
          "field_b_term": "invasive spread controlled by low-density growth at front"
        },
        {
          "field_a_term": "pushed wave (speed set by interior dynamics)",
          "field_b_term": "Allee effect — invasion stalls unless population exceeds threshold"
        }
      ],
      "references": [
        {
          "doi": "10.1111/j.1469-1809.1937.tb02153.x",
          "note": "Fisher (1937) Ann Eugen 7:355 — the wave of advance of advantageous genes (original spreading-speed derivation)"
        },
        {
          "note": "Kolmogorov, Petrovsky & Piskunov (1937) Bull Univ Moscow 1:1 — étude de l'équation de la diffusion avec croissance de la quantité de matière et son application à un problème biologique"
        },
        {
          "doi": "10.2307/2265698",
          "note": "Kot et al. (1996) Ecology 77:2027 — dispersal data and the spread of invading organisms"
        },
        {
          "note": "Shigesada & Kawasaki (1997) Biological Invasions: Theory and Practice — Oxford University Press"
        }
      ],
      "last_reviewed": "2026-05-06",
      "file": "cross-domain/biology-mathematics/b-invasion-biology-spreading-speeds.yaml"
    },
    {
      "id": "b-metabolic-control-analysis-x-local-sensitivity",
      "title": "Metabolic control analysis (MCA) defines flux control coefficients C^J_i = (∂ln|J|/∂ln p_i) as logarithmic sensitivities of steady-state pathway fluxes to enzyme activities — structurally identical to normalized Jacobian sensitivities and elasticity coefficients in nonlinear dynamical systems theory applied to biochemical networks.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "MCA summarizes how small parameter perturbations around steady states propagate to fluxes — directly analogous to sensitivity analysis of steady solutions of ODEs dx/dt = f(x,p) where ∂x/∂p solves an adjoint linearization. Summation and connectivity theorems constrain coefficient networks — mirrorin",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-metabolic-control-analysis-x-local-sensitivity"
      ],
      "communication_gap": "Biochemical MCA literature uses enzyme-centric notation uncommon in math departments; conversely pure sensitivity-analysis textbooks rarely cite connectivity theorems discovered in metabolic control papers.\n",
      "translation_table": [
        {
          "field_a_term": "Flux control coefficient C^J_i (MCA)",
          "field_b_term": "∂ln J / ∂ln v_i steady-state sensitivity (implicit differentiation)",
          "note": "Matches logarithmic derivatives common in uncertainty quantification of kinetic models."
        },
        {
          "field_a_term": "Elasticity ε^v_j = ∂ln v / ∂ln s_j",
          "field_b_term": "Partial derivative of rate law w.r.t. substrate in Jacobian blocks",
          "note": "Same local linearization objects assembled into scaled Jacobian matrices."
        },
        {
          "field_a_term": "Summation theorem Σ_i C^J_i = 1 (for simple chains under stated assumptions)",
          "field_b_term": "Matrix identities linking rows of sensitivity matrices to conserved stoichiometric subspaces",
          "note": "Structural constraints analogous to rank-nullspace projections in mass-balance ODEs."
        }
      ],
      "references": [
        {
          "doi": "10.1016/0076-6879(86)13086-1",
          "note": "Fell, Sauro & Small (1986) Methods in Enzymology — control analysis methods and matrix formulation"
        },
        {
          "doi": "10.1016/0301-4622(95)90002-5",
          "note": "Fell (1995) J. Theor. Biol. — review of metabolic control in pathway context"
        }
      ],
      "last_reviewed": "2026-05-07",
      "file": "cross-domain/biology-mathematics/b-metabolic-control-analysis-x-local-sensitivity.yaml"
    },
    {
      "id": "b-morphogen-turing-patterning",
      "title": "Turing's reaction-diffusion mechanism explains how uniform morphogen distributions spontaneously break symmetry to generate periodic spatial patterns when an activator diffuses slower than its inhibitor, with pattern wavelength lambda = 2*pi * sqrt(D_u/sigma) set by diffusion coefficients",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "In a two-component reaction-diffusion system du/dt = D_u * nabla^2 u + f(u,v), dv/dt = D_v * nabla^2 v + g(u,v), a homogeneous steady state that is stable to uniform perturbations becomes unstable to spatial perturbations when D_v/D_u > gamma (Turing instability condition), generating self-organized",
      "open_unknowns": [],
      "related_hypotheses": [],
      "communication_gap": "Developmental biologists identify molecular components of patterning while mathematicians analyze RD equations; connecting measured morphogen diffusion coefficients and reaction rates to observed pattern wavelengths via the Turing dispersion relation is rarely done quantitatively in developmental biology papers.",
      "translation_table": [
        {
          "field_a_term": "activator-inhibitor short-range activation / long-range inhibition",
          "field_b_term": "Turing instability condition: D_v >> D_u and f_u > 0, g_v < 0",
          "note": "Pattern formation requires activator to be locally autocatalytic (f_u > 0) and inhibited by faster-diffusing inhibitor"
        },
        {
          "field_a_term": "pattern wavelength in digit/stripe patterning",
          "field_b_term": "lambda = 2*pi / k_max where k_max maximizes Re(sigma(k))",
          "note": "k_max from dispersion relation of linearized RD system; observed stripe spacing ~ lambda"
        },
        {
          "field_a_term": "morphogen gradient Bicoid/Nodal",
          "field_b_term": "source term or spatially varying parameter in RD equations",
          "note": "Pre-patterning gradients modulate local RD parameters, selecting which Turing mode is expressed"
        },
        {
          "field_a_term": "finger/digit spacing in limb development",
          "field_b_term": "Turing wavelength controlled by BMP/Wnt activator-inhibitor pair",
          "note": "Shyer et al. (2018) demonstrated BMP-Wnt RD mechanism in chick digit patterning"
        }
      ],
      "references": [
        {
          "doi": "10.1098/rstb.1952.0012",
          "note": "Turing (1952) - The chemical basis of morphogenesis: original RD theory paper"
        },
        {
          "doi": "10.1126/science.aai7830",
          "note": "Shyer et al. (2017) Science - Turing mechanism for digit patterning via BMP-WNT"
        },
        {
          "doi": "10.1038/nrm2763",
          "note": "Kondo & Miura (2010) Nat Rev Mol Cell Biol - reaction-diffusion model as a framework for understanding biological pattern formation"
        }
      ],
      "last_reviewed": "2026-05-07",
      "file": "cross-domain/biology-mathematics/b-morphogen-turing-patterning.yaml"
    },
    {
      "id": "b-phylogenetics-maximum-likelihood",
      "title": "Phylogenetic tree inference is maximum likelihood estimation over a combinatorial parameter space of tree topologies and branch lengths under Markov nucleotide substitution models — Felsenstein's pruning algorithm makes the likelihood tractable, and Bayesian MCMC extensions unify evolutionary biology with probabilistic graphical models and molecular clocks.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "Phylogenetics is a formally defined statistical inference problem: given aligned DNA (or protein) sequences from n taxa, find the evolutionary tree topology τ and branch lengths t that maximise the probability of observing the data:\n  L(τ, t | data) = P(data | τ, t, model)\nThe model is a continuous-",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-gtr-model-adequate-metazoan-divergence-estimation"
      ],
      "communication_gap": "Felsenstein (1981) published in J Mol Evol, a specialist journal inaccessible to most mathematicians and statisticians. The equivalence between phylogenetic pruning and the forward algorithm for HMMs was not noted until the late 1990s, causing independent development of computational methods in both fields. Bayesian phylogenetics (MrBayes 2001) was initially resisted by some systematists sceptical of Bayesian methods — a philosophical dispute from statistics that delayed adoption. The BEAST software (Drummond & Rambaut 2007) democratised Bayesian molecular-clock analysis but is still regarded as a specialised bioinformatics tool rather than a Bayesian graphical model by the statistics community.\n",
      "translation_table": [
        {
          "field_a_term": "evolutionary tree topology τ",
          "field_b_term": "combinatorial latent variable in a probabilistic graphical model",
          "note": "the tree is a rooted or unrooted graph; posterior over τ is MCMC-sampled"
        },
        {
          "field_a_term": "nucleotide substitution rate matrix Q",
          "field_b_term": "instantaneous generator of continuous-time Markov chain",
          "note": "transition probabilities P(t) = e^{Qt} derived by matrix exponentiation"
        },
        {
          "field_a_term": "Felsenstein pruning algorithm (likelihood on tree)",
          "field_b_term": "forward algorithm on a tree-structured hidden Markov model",
          "note": "identical dynamic programming structure; different combinatorial space"
        },
        {
          "field_a_term": "branch length t (substitutions per site)",
          "field_b_term": "evolutionary time × rate (molecular clock: t = r × T)",
          "note": "calibrated to absolute time using fossil age constraints on internal nodes"
        },
        {
          "field_a_term": "MCMC posterior sampling over tree topologies and branch lengths",
          "field_b_term": "Bayesian inference in a high-dimensional discrete+continuous space",
          "note": "requires specialised topology proposal moves not needed for continuous parameters"
        },
        {
          "field_a_term": "relaxed molecular clock (log-normal rate variation across branches)",
          "field_b_term": "hierarchical Bayesian model with branch-specific rate hyperparameters",
          "note": "allows rate variation while preserving identifiability via shrinkage prior"
        }
      ],
      "references": [
        {
          "doi": "10.1007/BF01734359",
          "note": "Felsenstein (1981) J Mol Evol 17:368 — pruning algorithm for phylogenetic likelihood"
        },
        {
          "note": "Jukes & Cantor (1969) in Munson & Kern (eds) Mammalian Protein Metabolism, pp 21-132"
        },
        {
          "doi": "10.1186/1471-2148-7-214",
          "note": "Drummond & Rambaut (2007) BMC Evol Biol 7:214 — BEAST software for Bayesian phylogenetics"
        },
        {
          "note": "Zuckerkandl & Pauling (1965) J Theor Biol 8:357 — molecular clock hypothesis"
        }
      ],
      "last_reviewed": "2026-05-06",
      "file": "cross-domain/biology-mathematics/b-phylogenetics-maximum-likelihood.yaml"
    },
    {
      "id": "b-population-genetics-diffusion",
      "title": "The Wright-Fisher model of allele frequency evolution under drift and selection maps exactly onto a Fokker-Planck diffusion equation — Kimura's fixation probability formula and the stationary beta distribution are exact solutions, unifying probability theory and evolutionary genetics.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "The Wright-Fisher model: a population of N diploid individuals; each generation, 2N gene copies sampled from previous generation (binomial sampling = genetic drift). For large N, the allele frequency p(t) follows the Fokker-Planck (diffusion) equation: ∂f/∂t = −∂/∂p[M(p)f] + ½∂²/∂p²[V(p)f], where dr",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-nearly-neutral-theory-diffusion-boundary"
      ],
      "communication_gap": "Kimura's diffusion approximation was developed by a geneticist (with formal physics training) writing primarily for mathematical geneticists. Population geneticists routinely apply the diffusion framework but do not always connect it to the broader Fokker-Planck literature in physics and stochastic processes. Conversely, physicists studying stochastic dynamics rarely know that Kimura's fixation formula is an exact solution to their standard equation. The connection to Dirichlet processes (Bayesian nonparametrics) via the Ewens sampling formula is known in mathematical statistics but not well-known in evolutionary biology.\n",
      "translation_table": [
        {
          "field_a_term": "Fokker-Planck diffusion equation ∂f/∂t = −∂[Mf]/∂p + ½∂²[Vf]/∂p²",
          "field_b_term": "Wright-Fisher diffusion for allele frequency evolution"
        },
        {
          "field_a_term": "drift coefficient M(p) = sp(1−p)",
          "field_b_term": "natural selection pressure on allele frequency"
        },
        {
          "field_a_term": "diffusion coefficient V(p) = p(1−p)/N",
          "field_b_term": "genetic drift (sampling noise inversely proportional to population size)"
        },
        {
          "field_a_term": "absorbing boundaries at p=0 and p=1",
          "field_b_term": "allele loss (p=0) or fixation (p=1) — irreversible evolutionary events"
        },
        {
          "field_a_term": "Kimura fixation probability formula",
          "field_b_term": "probability that a new mutation eventually spreads to fixation"
        },
        {
          "field_a_term": "beta distribution stationary state",
          "field_b_term": "allele frequency spectrum at mutation-drift-selection equilibrium"
        },
        {
          "field_a_term": "effective population size N_e (harmonic mean over bottlenecks)",
          "field_b_term": "effective diffusion constant — sets balance between drift and selection"
        }
      ],
      "references": [
        {
          "note": "Wright (1931) Evolution in Mendelian populations. Genetics 16:97"
        },
        {
          "doi": "10.2307/3211856",
          "note": "Kimura (1964) Diffusion models in population genetics. J Appl Probab 1:177"
        },
        {
          "note": "Ewens (2004) Mathematical Population Genetics, 2nd ed. Springer"
        },
        {
          "note": "Crow & Kimura (1970) An Introduction to Population Genetics Theory. Harper & Row"
        }
      ],
      "last_reviewed": "2026-05-06",
      "file": "cross-domain/biology-mathematics/b-population-genetics-diffusion.yaml"
    },
    {
      "id": "b-protein-crystallography-space-groups",
      "title": "Protein crystal packing is governed by the 65 chiral (Sohncke) space groups of classical crystallography: group-theoretic symmetry constraints determine allowable unit-cell geometries, reduce the phase problem to a finite search, and predict systematic absences in diffraction patterns with mathematical precision.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "A crystal is a periodic repetition of a unit cell under the action of a space group G ≤ O(3) ⋊ ℝ³. For chiral molecules like proteins (L-amino acids), only the 65 Sohncke groups (those lacking improper symmetry operations) are allowed. The group action specifies: (1) how many independent copies of t",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-space-group-frequency-evolution-bias"
      ],
      "communication_gap": "Mathematical crystallographers (publishing in Acta Crystallographica A) and structural biologists (publishing in Nature Structural Biology, Structure) share the space-group formalism but rarely cross-publish theoretical analyses of why certain space groups are evolutionarily preferred for proteins.\n",
      "translation_table": [
        {
          "field_a_term": "Space group of the crystal lattice",
          "field_b_term": "Discrete group G ≤ O(3) ⋊ ℝ³ acting on electron density",
          "note": "65 chiral space groups for proteins; generators specify allowed symmetry operations"
        },
        {
          "field_a_term": "Asymmetric unit (AU)",
          "field_b_term": "Fundamental domain of the group action",
          "note": "AU contains exactly one independent copy of the molecule; full crystal = G × AU"
        },
        {
          "field_a_term": "Systematic absences in diffraction pattern",
          "field_b_term": "Group-theoretic extinction conditions on structure factors F(hkl)",
          "note": "Screw axes and glide planes force F(hkl)=0 for specific (h,k,l) combinations"
        },
        {
          "field_a_term": "Patterson function P(u)",
          "field_b_term": "Autocorrelation of the group-invariant electron density",
          "note": "Peaks at inter-atomic vectors; symmetry of P(u) reflects point group, enabling molecular replacement"
        }
      ],
      "references": [
        {
          "doi": "10.1107/S2059798316007191",
          "note": "Winn et al. – CCP4 suite; space-group determination in protein crystallography"
        },
        {
          "doi": "10.1107/S0907444909042073",
          "note": "McCoy et al. (2007) – PHASER: likelihood-based molecular replacement using group theory"
        },
        {
          "doi": "10.1107/S2052252517000422",
          "note": "Dauter & Jaskolski (2010) – How to read (and understand) Volume A of IT for Crystallographers"
        },
        {
          "doi": "10.1016/S0022-2836(05)80360-2",
          "note": "Rossmann & Arnold – International Tables for X-ray Crystallography: protein space groups"
        }
      ],
      "last_reviewed": "2026-05-07",
      "file": "cross-domain/biology-mathematics/b-protein-crystallography-space-groups.yaml"
    },
    {
      "id": "b-protein-folding-funnel-x-polyak-lojasiewicz-optimization-region",
      "title": "Funneled folding landscapes imply gradient-like descent toward the native basin along collective coordinates — modern optimization theory formalizes “geometry-dominated” nonconvex minimization via Polyak–Łojasiewicz (PL) inequalities near sharp minima (biophysics ↔ continuous optimization).\n",
      "source_domain": "",
      "target_domain": "",
      "status": "proposed",
      "bridge_claim": "Energy landscape theory pictures folding as movement on a rough free energy surface G(Q) that becomes funnel-shaped toward the native ensemble. In optimization, PL regions satisfy ‖∇f‖² ≥ μ(f−f*) — guaranteeing linear convergence of gradient methods despite nonconvexity. Mapping PL constants to phys",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-two-state-folders-admit-pl-like-surrogate-on-contact-order-parameter"
      ],
      "communication_gap": "Protein folders cite “funnels” qualitatively; optimization theorists prove PL guarantees for neural nets—cross-field citations remain thin, and equating PL constants to physical barrier heights would be **speculative** without explicit coarse-graining.\n",
      "translation_table": [
        {
          "field_a_term": "folding funnel steepness near native basin",
          "field_b_term": "Polyak–Łojasiewicz-type gradient dominance parameter μ (when an appropriate smooth surrogate exists)",
          "note": "PL is defined for smooth objectives on Euclidean spaces; free energy surfaces require coarse-graining caveats."
        },
        {
          "field_a_term": "kinetic traps / misfolding",
          "field_b_term": "regions where gradient descent (on surrogates) stalls — violation of PL-like inequalities",
          "note": "Analogical; trap kinetics include entropy barriers not captured by deterministic gradient scaling alone."
        },
        {
          "field_a_term": "reaction coordinate Q (fraction native contacts)",
          "field_b_term": "chart on which a landscape surrogate f(Q) might be PL in toy models",
          "note": "One-dimensional projections lose frustration degrees of freedom."
        }
      ],
      "references": [
        {
          "doi": "10.1073/pnas.95.10.5921",
          "note": "Onuchic et al. (1998) — folding funnels and reaction pathways (PNAS)."
        },
        {
          "doi": "10.1007/978-3-319-46128-1_50",
          "note": "Karimi, Nutini & Schmidt (2016) — linear convergence under Polyak–Łojasiewicz (ECML PKDD proceedings)."
        }
      ],
      "last_reviewed": "2026-05-07",
      "file": "cross-domain/biology-mathematics/b-protein-folding-funnel-x-polyak-lojasiewicz-optimization-region.yaml"
    },
    {
      "id": "b-replicator-equations-evolutionary-dynamics",
      "title": "The replicator equation ẋᵢ = xᵢ(fᵢ - f̄) governs strategy frequencies in evolutionary game theory, population genetics, and reinforcement learning — its trajectories on the probability simplex converge to Nash equilibria (evolutionary stable strategies), and the Price equation provides a unified mathematical framework for all levels of selection simultaneously.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "The replicator equation, derived independently in evolutionary biology, game theory, and learning theory, is:\n\n  ẋᵢ = xᵢ (fᵢ(x) - f̄(x))\n\nwhere xᵢ is the frequency of strategy i, fᵢ(x) = Σⱼ aᵢⱼ xⱼ is the payoff to strategy i (with payoff matrix A), and f̄ = Σᵢ xᵢ fᵢ is the mean payoff. The key prope",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-replicator-rl-convergence"
      ],
      "communication_gap": "Taylor & Jonker (1978) published the replicator dynamics in Mathematical Biosciences; the connection to Nash equilibria was noted by game theorists later. Population geneticists and evolutionary game theorists develop parallel mathematical frameworks (Wright-Fisher vs. replicator dynamics) without realising they are the same equations. The connection to reinforcement learning (Sutton & Barto, Williams 1992) was made in the ML community without reference to evolutionary biology. Hofbauer & Sigmund (1998) is the definitive reference synthesising the connections, but it is not widely read outside mathematical biology. The Price equation is taught in evolutionary biology but rarely connected to RL theory in machine learning courses.\n",
      "translation_table": [
        {
          "field_a_term": "Strategy i with frequency xᵢ",
          "field_b_term": "Allele i with frequency xᵢ (genetics) / action i with probability π(a) (RL)",
          "note": "The probability simplex Δⁿ is simultaneously: allele frequency space, mixed strategy simplex, policy space"
        },
        {
          "field_a_term": "Payoff matrix A (aᵢⱼ = payoff to i from j)",
          "field_b_term": "Fitness matrix (genetics) / reward matrix (game theory) / Q-values (RL)",
          "note": "The replicator equation is linear in frequencies when A is a fixed payoff matrix"
        },
        {
          "field_a_term": "Nash equilibrium x*",
          "field_b_term": "Evolutionary stable strategy (ESS) / fixed point of policy gradient / Hardy-Weinberg equilibrium",
          "note": "ESS ⊂ Nash ⊂ rest points of replicator; ESS is the evolutionarily robust Nash equilibrium"
        },
        {
          "field_a_term": "Mean fitness f̄ = Σᵢ xᵢ fᵢ",
          "field_b_term": "Average value / reward / population mean fitness",
          "note": "Fisher's Fundamental Theorem: df̄/dt = Var(f) ≥ 0; mean fitness is a Lyapunov function for weak selection"
        },
        {
          "field_a_term": "Probability simplex Δⁿ",
          "field_b_term": "State space of allele/strategy frequencies (closed under replicator dynamics)",
          "note": "The simplex is a forward-invariant manifold; frequencies stay non-negative and sum to 1"
        },
        {
          "field_a_term": "Heteroclinic cycle",
          "field_b_term": "Non-equilibrium cyclic dominance (rock-paper-scissors dynamics)",
          "note": "Non-transitive competition (lizard morphs, bacterial strains, immune evasion) generates cycles, not equilibria"
        },
        {
          "field_a_term": "Price equation w̄ Δz̄ = Cov(w,z) + E(wΔz)",
          "field_b_term": "Universal partition of evolutionary change into selection + transmission components",
          "note": "Contains: Fisher's theorem (set Δz=0), Hamilton's rule for kin selection, multilevel selection; applicable to culture"
        }
      ],
      "references": [
        {
          "doi": "10.1016/0025-5564(78)90077-9",
          "note": "Taylor & Jonker (1978) Math Biosci 40:145 — evolutionary stable strategies and game dynamics; replicator equation"
        },
        {
          "doi": "10.1038/246015a0",
          "note": "Maynard Smith & Price (1973) Nature 246:15 — the logic of animal conflict; evolutionarily stable strategies"
        },
        {
          "doi": "10.1038/227520a0",
          "note": "Price (1970) Nature 227:520 — selection and covariance; the Price equation"
        },
        {
          "note": "Hofbauer & Sigmund (1998) Evolutionary Games and Population Dynamics (Cambridge UP)",
          "url": "https://www.cambridge.org/core/books/evolutionary-games-and-population-dynamics/4B3E92E10A2EBF13CDB08F55C68F8A05"
        },
        {
          "doi": "10.1073/pnas.97.17.9430",
          "note": "Sutton et al. (2000) NIPS — policy gradient methods for RL; equivalence with replicator dynamics"
        }
      ],
      "last_reviewed": "2026-05-05",
      "file": "cross-domain/biology-mathematics/b-replicator-equations-evolutionary-dynamics.yaml"
    },
    {
      "id": "b-cellular-senescence-tumor-suppression",
      "title": "Cellular senescence is a tumor-suppressive mechanism that permanently arrests cell proliferation in response to oncogenic stress, but the senescence-associated secretory phenotype (SASP) paradoxically promotes inflammation and cancer in aged tissues",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "Oncogene-induced senescence (OIS) causes permanent cell cycle arrest via p21/p16-Rb pathway activation, suppressing tumor progression by removing pre-cancerous cells from the proliferating pool; however, the pro-inflammatory SASP (IL-6, IL-8, MMPs secreted by senescent cells) creates a tissue microe",
      "open_unknowns": [],
      "related_hypotheses": [],
      "communication_gap": "Cell biologists study senescence molecular mechanisms while oncologists and geriatricians observe its systemic effects; the dual tumor-suppressive and tumor-promoting roles of senescence are underappreciated clinically, creating a therapeutic challenge for senolytics in cancer patients.",
      "translation_table": [
        {
          "field_a_term": "replicative senescence (telomere shortening arrest)",
          "field_b_term": "p53/p21 pathway activation at critically short telomere",
          "note": "Telomere erosion triggers ATM/ATR kinase response, activating p53 -> p21 -> Rb pathway to halt S phase"
        },
        {
          "field_a_term": "oncogene-induced senescence (OIS)",
          "field_b_term": "p16INK4a/Rb pathway activation by Ras, BRAF oncoproteins",
          "note": "Oncogenic Ras activates ARF -> MDM2 inhibition -> p53 and independently p16 -> Rb; both pathways required for stable OIS"
        },
        {
          "field_a_term": "SASP cytokines (IL-6, IL-8, GRO-alpha)",
          "field_b_term": "NF-kB and mTOR activation in senescent cells",
          "note": "SASP promotes paracrine senescence and immune surveillance; chronic SASP without clearance is pro-tumorigenic"
        },
        {
          "field_a_term": "immune clearance of senescent cells (immunosurveillance)",
          "field_b_term": "NK cell and macrophage recognition of SASP NKG2D ligands and eat-me signals",
          "note": "Efficient clearance is tumor-suppressive; impaired clearance leads to SASP accumulation and cancer promotion"
        }
      ],
      "references": [
        {
          "doi": "10.1016/j.cell.2006.02.015",
          "note": "Campisi & d'Adda di Fagagna (2007) Nat Rev Mol Cell Biol - cellular senescence: when bad things happen to good cells"
        },
        {
          "doi": "10.1038/nature04268",
          "note": "Braig et al. (2005) Nature - oncogene-induced senescence as initial barrier to malignant transformation"
        },
        {
          "doi": "10.1016/j.cell.2019.06.001",
          "note": "Gorgoulis et al. (2019) Cell - cellular senescence: defining a path forward"
        }
      ],
      "last_reviewed": "2026-05-07",
      "file": "cross-domain/biology-medicine/b-cellular-senescence-tumor-suppression.yaml"
    },
    {
      "id": "b-glymphatic-aging",
      "title": "The glymphatic system — studied separately in sleep medicine, neurology, and geroscience — is a single cross-cutting mechanism linking sleep quality, amyloid clearance, and brain aging rate.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "proposed",
      "bridge_claim": "The glymphatic system (peri-arterial CSF influx driving interstitial waste efflux along paravascular spaces) is studied in three largely separate literatures: sleep medicine (it is most active during slow-wave sleep), neurology (its dysfunction correlates with amyloid-beta accumulation in Alzheimer'",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-glymphatic-amyloid-clearance-rate"
      ],
      "communication_gap": "Sleep medicine clinicians work in polysomnography labs; neurologists focus on amyloid PET and CSF biomarkers; geroscientists study hallmarks of aging in model organisms. Each community has its own conferences (SLEEP, Alzheimer's Association, Geroscience Summits) and rarely shares data formats, let alone quantitative models. Fluid-dynamics researchers are almost entirely absent from all three conference circuits.\n",
      "translation_table": [
        {
          "field_a_term": "slow-wave sleep duration (sleep medicine)",
          "field_b_term": "glymphatic pulse amplitude (neurology)",
          "note": "SWS drives the arterial pulsatility that pumps glymphatic flow"
        },
        {
          "field_a_term": "AQP4 polarisation (neurology)",
          "field_b_term": "hydraulic conductivity of endfeet (fluid dynamics)",
          "note": "AQP4 density on endfeet sets the water permeability driving clearance"
        },
        {
          "field_a_term": "amyloid-beta clearance rate (neurology)",
          "field_b_term": "solute efflux velocity in perivascular space (fluid dynamics)",
          "note": "Measurable output of glymphatic transport efficiency"
        },
        {
          "field_a_term": "biological age of brain (geroscience)",
          "field_b_term": "cumulative amyloid burden index (neurology)",
          "note": "Aging accelerates amyloid deposition partly through glymphatic decline"
        }
      ],
      "references": [
        {
          "arxiv": "2405.05812",
          "note": "Harvested paper seeding u-amyloid-progression-trajectory"
        },
        {
          "doi": "10.1126/science.1241224",
          "note": "Xie et al. 2013 — sleep drives metabolite clearance in the brain"
        },
        {
          "doi": "10.1073/pnas.1319576111",
          "note": "Iliff et al. 2014 — glymphatic transport and amyloid clearance"
        }
      ],
      "last_reviewed": "2026-05-04",
      "file": "cross-domain/biology-medicine/b-glymphatic-aging.yaml"
    },
    {
      "id": "b-protein-interaction-robustness",
      "title": "The human protein-protein interaction network is scale-free, making it robust to random protein loss but fragile to targeted hub removal — the same robustness-fragility tradeoff that governs all scale-free networks.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "The human protein-protein interaction (PPI) network has degree distribution P(k) ∝ k^(−γ) with γ ≈ 2.4, the signature of a scale-free network grown by preferential attachment. Essential proteins (those whose knockout is lethal) are enriched at high-degree hubs. Network percolation theory predicts th",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-ppi-hub-targeting-cancer"
      ],
      "communication_gap": "Network science and molecular biology publish in different journals. The scale-free network framework was applied to PPIs by Barabási's group but has not fully integrated into clinical pharmacology, where drug target selection is still dominated by pathway analysis rather than network topology.\n",
      "translation_table": [
        {
          "field_a_term": "protein interaction hub (high-degree node)",
          "field_b_term": "internet router or power grid substation (high-degree node)",
          "note": "Structural equivalence — both follow scale-free degree distribution"
        },
        {
          "field_a_term": "essential protein (lethal knockout)",
          "field_b_term": "critical infrastructure node",
          "note": "Biological essentiality maps onto network critical node; both identified by betweenness centrality"
        },
        {
          "field_a_term": "percolation threshold p_c",
          "field_b_term": "mutation load threshold for viable organism",
          "note": "Below p_c fraction of removed proteins, giant connected component collapses"
        },
        {
          "field_a_term": "drug target hub protein",
          "field_b_term": "congestion control bottleneck",
          "note": "Both exploit hub fragility for disruption, with corresponding off-target effects"
        }
      ],
      "references": [
        {
          "doi": "10.1126/science.286.5439.509",
          "note": "Barabási, A-L. & Albert, R. (1999). Emergence of scaling in random networks. Science 286:509."
        },
        {
          "doi": "10.1038/35075138",
          "note": "Jeong, H. et al. (2001). Lethality and centrality in protein networks. Nature 411:41."
        },
        {
          "doi": "10.1038/35019019",
          "note": "Albert, R. et al. (2000). Error and attack tolerance of complex networks. Nature 406:378."
        },
        {
          "doi": "10.1073/pnas.0701361104",
          "note": "Goh, K-I. et al. (2007). The human disease network. PNAS 104:8685."
        }
      ],
      "last_reviewed": "2026-05-06",
      "file": "cross-domain/biology-network-science/b-protein-interaction-robustness.yaml"
    },
    {
      "id": "b-random-boolean-networks-cell-fate",
      "title": "Kauffman's NK random Boolean network model predicts the number of stable cell types as sqrt(N) attractors in a genome-scale regulatory network of N genes with K inputs per gene; attractor states in the dynamical network correspond one-to-one with stable cell fates, providing a physics-of-complexity explanation for the Hayflick limit on differentiation state number\n",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "In Kauffman's NK random Boolean network model (N genes, K=2 inputs per gene), the number of dynamical attractors scales as sqrt(N) ≈ 2^(N/2) for large sparse networks, which correctly predicts that a human genome of ~25,000 genes supports ~150 cell types (sqrt(25000) ≈ 158); each attractor of the Bo",
      "open_unknowns": [],
      "related_hypotheses": [],
      "communication_gap": "Kauffman's theoretical biology work (1969–1993) is widely cited in complexity science but rarely taught in cell biology curricula; cell biologists learning Waddington's epigenetic landscape are not typically exposed to the formal Boolean network mathematics that gives Waddington's landscape quantitative predictive power. The attractor-cell-fate correspondence is treated as metaphor in most cell biology textbooks rather than as a quantitative prediction.\n",
      "translation_table": [
        {
          "field_a_term": "attractor state of Boolean regulatory network (theoretical biology)",
          "field_b_term": "stable cell fate or differentiation state (cell biology)",
          "note": "Both are minimal invariant sets that the system returns to after small perturbations; attractors map to cell types experimentally verified by Huang et al. (2005)"
        },
        {
          "field_a_term": "basin of attraction (theoretical biology)",
          "field_b_term": "set of initial gene expression states that converge to a cell type (cell biology)",
          "note": "Transcription factor perturbations that fail to change cell fate reside in the same basin; reprogramming requires crossing a basin boundary"
        },
        {
          "field_a_term": "attractor cycle length (theoretical biology)",
          "field_b_term": "cell-cycle period or gene expression oscillation period (cell biology)",
          "note": "Attractors with cycle length 1 (fixed points) map to terminally differentiated cells; longer cycles correspond to cycling progenitor states"
        },
        {
          "field_a_term": "K-connectivity controlling criticality (theoretical biology)",
          "field_b_term": "average transcription factor in-degree in gene regulatory network (cell biology)",
          "note": "K=2 places network at the edge of chaos (critical); real GRNs have K≈2 in-degree, consistent with critical dynamics maintaining evolvability"
        },
        {
          "field_a_term": "noise-induced transitions between attractors (theoretical biology)",
          "field_b_term": "spontaneous transdifferentiation or reprogramming events (cell biology)",
          "note": "Stochastic gene expression drives rare transitions between attractor basins; Yamanaka reprogramming is a noise-assisted basin escape"
        }
      ],
      "references": [
        {
          "doi": "10.1073/pnas.1005725107",
          "note": "Huang et al. (2010) — Bifurcation dynamics in lineage-commitment in bipotent progenitor cells; empirical validation of attractor model for cell fate"
        },
        {
          "doi": "10.1007/BF01742986",
          "note": "Kauffman (1969) — Metabolic stability and epigenesis in randomly constructed genetic nets; original NK model and cell-type attractor prediction"
        },
        {
          "doi": "10.1073/pnas.0603071103",
          "note": "Huang et al. (2005) — Cell fates as high-dimensional attractor states of a complex gene regulatory network; direct experimental mapping"
        },
        {
          "doi": "10.1093/bioinformatics/bth088",
          "note": "Albert & Othmer (2003) — The topology of the regulatory interactions predicts the expression pattern of the Drosophila segment polarity genes; Boolean network applied to development"
        }
      ],
      "last_reviewed": "2026-05-07",
      "file": "cross-domain/biology-network-science/b-random-boolean-networks-cell-fate.yaml"
    },
    {
      "id": "b-circadian-rhythms-neural-oscillators",
      "title": "Circadian clocks are cell-autonomous delayed negative-feedback oscillators (Goodwin topology) whose ~20,000 SCN neurons synchronize via VIP-mediated coupling — a biological implementation of the Kuramoto coupled-oscillator model, where jet-lag recovery rate is determined by the second eigenvalue of the coupling matrix.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "Circadian clocks operate via transcription-translation feedback loops (TTFL): CLOCK/BMAL1 heterodimers activate PER/CRY gene transcription; PER/CRY proteins inhibit CLOCK/BMAL1 after a nuclear translocation delay — a delayed negative feedback oscillator. The Goodwin oscillator equations ẋ₁ = v₁/(1+(",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-kuramoto-scn-resynchronization-rate"
      ],
      "communication_gap": "Molecular chronobiologists publishing in Cell and Nature focus on gene-knockout phenotypes; mathematical biologists publishing Kuramoto analyses focus on network topology; clinical chronobiologists focus on shift-work health effects. These communities rarely cite each other, so quantitative predictions from network theory about resynchronization have not been translated into drug targets.\n",
      "translation_table": [
        {
          "field_a_term": "CLOCK/BMAL1 activator and PER/CRY repressor proteins",
          "field_b_term": "amplitude and phase variables of a nonlinear oscillator",
          "note": "protein concentrations encode phase; degradation rates set period"
        },
        {
          "field_a_term": "nuclear translocation delay of PER/CRY",
          "field_b_term": "time delay τ in a delayed differential equation oscillator",
          "note": "delay is essential for sustained oscillation; τ ≈ 6–8 h in mammals"
        },
        {
          "field_a_term": "VIP neuropeptide coupling between SCN neurons",
          "field_b_term": "sinusoidal coupling term in the Kuramoto model",
          "note": "VIP binds VPAC2 receptors and resets phase via cAMP-PKA signaling"
        },
        {
          "field_a_term": "SCN-wide entrainment to light-dark cycle",
          "field_b_term": "external forcing of a Kuramoto network above K_c",
          "note": "light acts on retinohypothalamic tract to phase-shift the SCN oscillator"
        },
        {
          "field_a_term": "jet-lag recovery timescale",
          "field_b_term": "1/Im(λ₂) — algebraic connectivity of the coupling network",
          "note": "faster network connectivity → faster resynchronization"
        }
      ],
      "references": [
        {
          "doi": "10.1038/343536a0",
          "note": "Hardin et al. (1990) Feedback of the Drosophila period gene product on circadian cycling of its messenger RNA levels. Nature 343:536–540"
        },
        {
          "doi": "10.1073/pnas.0308709101",
          "note": "Leloup & Goldbeter (2004) Toward a detailed computational model for the mammalian circadian clock. PNAS 101:17228–17233"
        },
        {
          "doi": "10.1038/nrn2914",
          "note": "Welsh et al. (2010) Suprachiasmatic nucleus: cell autonomy and network properties. Nat Rev Neurosci 11:764–773"
        },
        {
          "doi": "10.1529/biophysj.104.058388",
          "note": "Gonze et al. (2005) Spontaneous synchronization of coupled circadian oscillators. Biophys J 89:120–129"
        }
      ],
      "last_reviewed": "2026-05-06",
      "file": "cross-domain/biology-neuroscience/b-circadian-rhythms-neural-oscillators.yaml"
    },
    {
      "id": "b-sleep-memory-consolidation",
      "title": "Sleep hippocampal sharp-wave ripples and the synaptic homeostasis hypothesis bridge molecular sleep biology to systems neuroscience of memory — glymphatic clearance links sleep to neurodegeneration prevention",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "Sleep serves two intertwined functions that bridge molecular biology to systems neuroscience: (1) Memory consolidation — slow-wave sleep (SWS) sharp-wave ripples (SPW-Rs, 80-120 Hz high-frequency bursts in CA1) replay waking experience via hippocampal-neocortical dialogue, transferring episodic memo",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-targeted-memory-reactivation-during-sleep-enhances-consolidation"
      ],
      "communication_gap": "Molecular sleep biologists (studying circadian clocks, sleep genes, glymphatic channels) publish in Cell, Nature, and Current Biology; systems neuroscientists studying memory consolidation publish in Neuron, Nature Neuroscience, and Journal of Neuroscience. Clinical sleep medicine (polysomnography, sleep disorders) is largely separate from both. The three communities rarely co-design experiments or share methodologies, slowing translation of mechanistic sleep biology into clinical interventions for memory disorders and neurodegeneration.\n",
      "translation_table": [
        {
          "field_a_term": "sharp-wave ripple (SPW-R, electrophysiology)",
          "field_b_term": "memory reactivation event (cognitive neuroscience)",
          "note": "Each SPW-R burst in CA1 corresponds to a compressed replay of a waking experience sequence"
        },
        {
          "field_a_term": "hippocampal-neocortical transfer (systems neuroscience)",
          "field_b_term": "complementary learning systems theory (computational neuroscience)",
          "note": "Two-system memory architecture — fast hippocampal binding vs. slow neocortical consolidation"
        },
        {
          "field_a_term": "LTP / LTD (molecular biology, synaptic plasticity)",
          "field_b_term": "synaptic homeostasis / downscaling (sleep neuroscience)",
          "note": "Wake-driven LTP accumulates; sleep-driven scaling restores baseline synaptic weights"
        },
        {
          "field_a_term": "glymphatic system (AQP4 astrocyte water channels)",
          "field_b_term": "lymphatic drainage of CNS (physiology)",
          "note": "The brain's unique waste clearance system — the glymphatic pathway — is maximally active during NREM"
        },
        {
          "field_a_term": "amyloid-β clearance rate (molecular biology)",
          "field_b_term": "Alzheimer's disease risk (clinical neuroscience)",
          "note": "Sleep deprivation acutely increases amyloid-β CSF levels — chronic deprivation is an AD risk factor"
        }
      ],
      "references": [
        {
          "doi": "10.1126/science.7624455",
          "note": "Wilson & McNaughton (1994). Reactivation of hippocampal ensemble memories during sleep. Science 265:676."
        },
        {
          "note": "Tononi & Cirelli (2006). Sleep function and synaptic homeostasis. Sleep 29:145."
        },
        {
          "doi": "10.1038/nn.3937",
          "note": "Buzsáki (2015). Hippocampal sharp wave-ripple: A cognitive biomarker for episodic memory and planning. Nat Neurosci 18:1257."
        },
        {
          "doi": "10.1126/science.1241224",
          "note": "Xie et al. (2013). Sleep drives metabolite clearance from the adult brain. Science 342:373."
        }
      ],
      "last_reviewed": "2026-05-06",
      "file": "cross-domain/biology-neuroscience/b-sleep-memory-consolidation.yaml"
    },
    {
      "id": "b-action-potential-x-soliton",
      "title": "Action potential x Soliton — nerve impulse as nonlinear wave\n",
      "source_domain": "",
      "target_domain": "",
      "status": "proposed",
      "bridge_claim": "The Hodgkin-Huxley action potential propagates as a solitary wave (soliton) in the nonlinear cable equation; the nerve impulse velocity and shape stability arise from the same mathematical mechanism as solitons in the Korteweg-de Vries equation.\n",
      "open_unknowns": [],
      "related_hypotheses": [],
      "communication_gap": "Hodgkin-Huxley was developed as an electrophysiology model in 1952; soliton theory emerged in fluid mechanics. The mathematical equivalence was recognized but rarely integrated into neuroscience textbooks.\n",
      "translation_table": [
        {
          "field_a_term": "Hodgkin-Huxley membrane nonlinearity",
          "field_b_term": "KdV nonlinear dispersion term",
          "note": "The voltage-gated ion channel dynamics provide the nonlinearity that balances diffusive spreading, exactly as the nonlinear term in KdV balances dispersion.\n"
        },
        {
          "field_a_term": "Action potential propagation velocity",
          "field_b_term": "Soliton speed (function of amplitude)",
          "note": "Both velocities are determined by the amplitude-nonlinearity balance; the conduction velocity in myelinated axons scales analogously.\n"
        }
      ],
      "references": [
        {
          "doi": "10.1113/jphysiol.1952.sp004764",
          "note": "Hodgkin & Huxley (1952) — quantitative description of membrane current and nerve impulse"
        }
      ],
      "last_reviewed": "2026-05-07",
      "file": "cross-domain/biology-physics/b-action-potential-x-soliton.yaml"
    },
    {
      "id": "b-active-matter-cytoskeletal",
      "title": "Active matter physics ↔ cytoskeletal dynamics — living contractile gels and biological pattern formation",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "Active matter describes systems of self-propelled units that consume energy to generate mechanical forces and motion at the expense of internal free energy — far from thermodynamic equilibrium. The cell cytoskeleton is the canonical active matter system: actin filaments (passive polymer) + myosin II",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-topological-defects-as-mechanobiological-signals"
      ],
      "communication_gap": "Active matter physics emerged from the statistical physics community (Toner, Tu, Marchetti, Ramaswamy) with limited connection to cell biologists. The landmark Marchetti et al. (2013) Rev Mod Phys review presented the theory largely to physicists. Cell biologists studying cytoskeletal dynamics rarely read physical review journals. The Saw et al. (2017) Nature paper was transformative precisely because it bridged the communities by placing the abstract topological defect concept directly in a cell biology context with clear physiological implications.\n",
      "translation_table": [
        {
          "field_a_term": "active stress tensor σ_active = −ζQ",
          "field_b_term": "myosin-generated cortical tension in cells",
          "note": "ζ > 0 (contractile): cortex contracts toward order; ζ < 0 (extensile): stress fibres push out"
        },
        {
          "field_a_term": "activity coefficient ζ (energy input rate)",
          "field_b_term": "ATP hydrolysis rate / myosin motor density",
          "note": "Biochemical free energy input parameterised by a single physical coefficient"
        },
        {
          "field_a_term": "nematic order parameter Q (orientation tensor)",
          "field_b_term": "actin filament alignment / cell elongation direction",
          "note": "Q = 0 (isotropic disordered) → Q = 1 (fully aligned); measured by fluorescence polarisation"
        },
        {
          "field_a_term": "+½ topological defect in active nematic",
          "field_b_term": "cellular extrusion site (cell elimination from epithelium)",
          "note": "Saw et al. 2017: +½ defects generate compressive stress that squeezes cells out of monolayer"
        },
        {
          "field_a_term": "Toner-Tu equations (polar active fluid hydrodynamics)",
          "field_b_term": "collective cell migration / flocking of motile cells",
          "note": "The same PDE describes bird flocks, bacterial swarms, and migrating epithelial sheets"
        },
        {
          "field_a_term": "spontaneous flow instability (ζ > ζ_c)",
          "field_b_term": "spontaneous actin cortex oscillations in confined cells",
          "note": "Above a critical activity, the uniform state becomes unstable — predicts spontaneous cortex dynamics"
        }
      ],
      "references": [
        {
          "doi": "10.1103/PhysRevLett.75.4326",
          "note": "Toner & Tu (1995) Phys Rev Lett 75:4326 — hydrodynamics of active polar fluids"
        },
        {
          "doi": "10.1103/RevModPhys.85.1143",
          "note": "Marchetti et al. (2013) Rev Mod Phys 85:1143 — comprehensive active matter review"
        },
        {
          "doi": "10.1146/annurev-conmatphys-070909-104101",
          "note": "Ramaswamy (2010) Annu Rev Condens Matter Phys 1:323 — mechanics and statistics of active matter"
        },
        {
          "doi": "10.1038/nature21718",
          "note": "Saw et al. (2017) Nature 544:212 — topological defects trigger cell extrusion"
        }
      ],
      "last_reviewed": "2026-05-06",
      "file": "cross-domain/biology-physics/b-active-matter-cytoskeletal.yaml"
    },
    {
      "id": "b-biofilm-x-active-nematic",
      "title": "Bacterial biofilm ↔ Active nematics — collective orientation as liquid crystal order",
      "source_domain": "",
      "target_domain": "",
      "status": "proposed",
      "bridge_claim": "Dense bacterial communities in biofilms exhibit active nematic liquid crystal order; cell alignment, topological defect dynamics (+1/2 and -1/2 defects), and collective flows are quantitatively described by active nematic hydrodynamics, making biofilm architecture a biological realization of active ",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-biofilm-x-active-nematic"
      ],
      "communication_gap": "Active nematic liquid crystal physics was developed in soft matter physics (Marchetti, Dogic, Bhattacharya) using reconstituted cytoskeletal systems. Biofilm biology was developed in microbiology, focused on quorum sensing and matrix composition. The connection between biofilm cell alignment and active nematic theory was made only recently (Doostmohammadi et al. 2016, Copenhagen et al. 2021) — two decades after active nematics were first described theoretically.",
      "translation_table": [
        {
          "field_a_term": "bacterial cell orientation field (director field in dense biofilm)",
          "field_b_term": "nematic director n̂(r) in liquid crystal theory",
          "note": "Rod-shaped bacteria (E. coli, B. subtilis) align locally; long-range nematic order develops above critical density"
        },
        {
          "field_a_term": "topological defects in biofilm (+1/2 comet, -1/2 trefoil patterns)",
          "field_b_term": "±1/2 disclination defects in active nematic liquid crystal",
          "note": "+1/2 defects move, -1/2 defects are stationary; +1/2 defect velocity predicts local biofilm stress"
        },
        {
          "field_a_term": "biofilm mechanical stress at +1/2 defect cores",
          "field_b_term": "extensile stress in active nematic at defect cores (σ_active)",
          "note": "+1/2 defects accumulate mechanical stress, triggering cell extrusion and colony expansion"
        },
        {
          "field_a_term": "biofilm collective flow patterns (vortices, jets, bulk flow)",
          "field_b_term": "active nematic spontaneous flow instability (above activity threshold)",
          "note": "Active nematics above activity threshold a* develop spontaneous flow — matches biofilm flow patterns"
        }
      ],
      "references": [
        {
          "doi": "10.1038/nphys3600",
          "note": "Doostmohammadi et al. (2016) — biofilm active nematic order; Nature Physics — topological defects in bacteria"
        },
        {
          "doi": "10.1038/s41467-021-24792-2",
          "note": "Copenhagen et al. (2021) — topological defects promote layer formation in Myxococcus xanthus biofilms; Nature Comm"
        }
      ],
      "last_reviewed": "2026-05-07",
      "file": "cross-domain/biology-physics/b-biofilm-x-active-nematic.yaml"
    },
    {
      "id": "b-bioluminescence-quantum-yield",
      "title": "Bioluminescence converts chemical energy to photons via the luciferin-luciferase reaction with quantum yields up to 0.88, the highest of any biochemical process — the excited-state electronic structure of oxyluciferin determines emission wavelength, and luciferase active-site polarity tunes colour, bridging photochemistry, quantum optics, and molecular evolution of light production.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "Bioluminescence is the biological implementation of chemiluminescence — conversion of chemical bond energy directly to photons without thermal intermediates (no blackbody radiation). The key physical quantity is the quantum yield Φ = photons emitted / molecules reacted, which is the product of the e",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-bioluminescence-coevolution-visual-system-deep-sea"
      ],
      "communication_gap": "Bioluminescence research is distributed across marine biology (deep-sea surveys), biochemistry (luciferin-luciferase mechanism), photochemistry (excited-state quantum chemistry), and biotechnology (reporter applications) — with each community rarely engaging the others. The quantum chemistry of the excited-state mechanism (whether enolate or keto tautomer of oxyluciferin emits, which affects the predicted emission wavelength) was actively debated for decades partly because chemists and biochemists used different experimental systems and terminology. Marine bioluminescence ecologists rarely read photochemistry journals. Biotechnology applications of luciferase (BLI imaging, BRET biosensors) are developed without systematic engagement with the quantum yield physics that determines assay sensitivity.\n",
      "translation_table": [
        {
          "field_a_term": "quantum yield Φ = photons / reactions",
          "field_b_term": "luminescence efficiency of reporter assay",
          "note": "high Φ is essential for sensitive gene expression and ATP quantification assays"
        },
        {
          "field_a_term": "electronically excited singlet state S₁ of oxyluciferin",
          "field_b_term": "photon-emitting intermediate in luciferin oxidation pathway",
          "note": "formed by two-electron oxidative decarboxylation; the chemical mechanism producing the S1 state is still debated"
        },
        {
          "field_a_term": "solvatochromism (emission wavelength shift with dielectric environment)",
          "field_b_term": "colour tuning by luciferase active-site polarity",
          "note": "same physics as solvatochromic dyes; explains emission colour diversity without luciferin chemical changes"
        },
        {
          "field_a_term": "seawater blue transmission window (λ_min absorption ~ 480 nm)",
          "field_b_term": "convergent evolution of blue bioluminescence in deep-sea organisms",
          "note": "physical constraint (optical transmission) drives evolutionary selection of emission wavelength"
        },
        {
          "field_a_term": "pH-gated luciferase inhibition/activation",
          "field_b_term": "mechanical-to-chemical signal transduction in dinoflagellates",
          "note": "acidification from mechanical deformation switches on bioluminescence — a pH-sensitive quantum yield switch"
        },
        {
          "field_a_term": "BRET (bioluminescence resonance energy transfer)",
          "field_b_term": "FRET analogue using bioluminescence donor instead of fluorescent donor",
          "note": "eliminates need for external excitation light source; enables in vivo protein interaction monitoring"
        }
      ],
      "references": [
        {
          "doi": "10.1002/jcp.1030300104",
          "note": "McElroy (1947) J Cell Comp Physiol 30:11 — first characterisation of firefly luciferin-luciferase reaction"
        },
        {
          "note": "Seliger & McElroy (1960) Arch Biochem Biophys 88:136 — quantum yield measurement (Phi ~ 0.88)"
        },
        {
          "doi": "10.1007/s00018-002-8437-7",
          "note": "Viviani (2002) Cell Mol Life Sci 59:2029 — bioluminescence biochemistry review"
        },
        {
          "doi": "10.1146/annurev-marine-120308-081028",
          "note": "Haddock et al. (2010) Annu Rev Mar Sci 2:443 — bioluminescence in the ocean"
        }
      ],
      "last_reviewed": "2026-05-06",
      "file": "cross-domain/biology-physics/b-bioluminescence-quantum-yield.yaml"
    },
    {
      "id": "b-biophotonics-fluorescence-microscopy",
      "title": "Biophotonics and Fluorescence Microscopy — photophysics of excited states connects super-resolution imaging, FRET distance measurement, and genetically encoded reporters",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "Fluorescence proceeds through a Jablonski cycle: photon absorption promotes a molecule from S0 to S1 (~1 fs), vibrational relaxation dissipates energy (ps), and fluorescent emission follows (ns). The Stokes shift — emitted photon lower energy than absorbed — allows spectral separation of excitation ",
      "open_unknowns": [],
      "related_hypotheses": [],
      "communication_gap": "Optical physicists and photochemists who develop fluorescent probes and microscopy methods typically publish in optics journals; cell biologists who use the instruments read biology journals. Nobel Prize-level breakthroughs (STED 2014, GFP 2008) have started bridging these communities, but many biologists treat super-resolution microscopes as black boxes and few physicists appreciate the cell biology questions that motivate instrument development.\n",
      "translation_table": [
        {
          "field_a_term": "Stokes shift (emission redshifted vs. absorption)",
          "field_b_term": "vibrational energy dissipation before fluorescence",
          "note": "Enables spectral separation of excitation and emission; fundamental to signal-to-noise in microscopy"
        },
        {
          "field_a_term": "FRET efficiency E = 1/(1+(r/R₀)⁶)",
          "field_b_term": "distance between donor and acceptor fluorophores (2–10 nm)",
          "note": "Used to measure protein–protein distances, conformational changes, and binding events in live cells"
        },
        {
          "field_a_term": "STED depletion beam (stimulated emission depletion)",
          "field_b_term": "suppression of fluorescence outside the focal minimum",
          "note": "Doughnut-shaped STED beam forces molecules to ground state; only sub-diffraction-limited spot fluoresces"
        },
        {
          "field_a_term": "PALM/STORM single-molecule localisation",
          "field_b_term": "stochastic activation and precise centroid fitting of individual fluorophores",
          "note": "σ_loc = σ_PSF/√N; requires sparse activation so individual PSFs do not overlap"
        },
        {
          "field_a_term": "GFP chromophore (p-hydroxybenzylideneimidazolinone)",
          "field_b_term": "genetically encoded fluorescent tag for specific proteins in live cells",
          "note": "Beta-barrel scaffold autocatalytically forms chromophore; >200 variants cover the visible spectrum"
        },
        {
          "field_a_term": "Abbe diffraction limit d = λ/2NA",
          "field_b_term": "classical resolution limit of light microscopy (~200–250 nm)",
          "note": "Super-resolution methods circumvent this by exploiting fluorophore photophysics, not optics alone"
        }
      ],
      "references": [
        {
          "doi": "10.1364/OL.19.000780",
          "note": "Hell & Wichmann (1994) Opt Lett 19:780 — first STED proposal"
        },
        {
          "doi": "10.1126/science.1127344",
          "note": "Betzig et al. (2006) Science 313:1642 — PALM super-resolution"
        },
        {
          "doi": "10.1039/b809862j",
          "note": "Tsien (2008) Nobel Lecture — GFP development and fluorescent protein engineering"
        },
        {
          "note": "Förster (1948) Ann Phys 2:55 — original FRET theory and Förster radius"
        }
      ],
      "last_reviewed": "2026-05-06",
      "file": "cross-domain/biology-physics/b-biophotonics-fluorescence-microscopy.yaml"
    },
    {
      "id": "b-calcium-signaling-x-stochastic-resonance",
      "title": "Calcium Signaling x Stochastic Resonance — IP3 receptor as noise-enhanced detector\n",
      "source_domain": "",
      "target_domain": "",
      "status": "proposed",
      "bridge_claim": "Intracellular calcium oscillations generated by IP3 receptor clusters exhibit stochastic resonance: noisy calcium puffs (single cluster openings) coherently summate at an optimal noise level to produce global calcium waves that encode information more reliably than deterministic firing would; subcel",
      "open_unknowns": [],
      "related_hypotheses": [],
      "communication_gap": "Stochastic resonance was discovered by climate scientists (Benzi 1981) and developed by physicists for sensory systems; calcium biologists studied IP3R cluster dynamics experimentally without the SR mathematical framework; the connection (calcium puffs as SR noise source) was made by computational cell biologists in the 2000s and remains underutilized in calcium imaging analysis.\n",
      "translation_table": [
        {
          "field_a_term": "IP3 receptor cluster (stochastic puff generator)",
          "field_b_term": "Noisy threshold detector in SR framework",
          "note": "IP3R clusters open stochastically with probability depending on local Ca²⁺ and IP3 concentration; the stochastic opening is the noise source that drives stochastic resonance in the calcium signaling system.\n"
        },
        {
          "field_a_term": "Calcium puff (local cluster event)",
          "field_b_term": "Sub-threshold stochastic fluctuation",
          "note": "Calcium puffs are localized releases from individual IP3R clusters; at sub-threshold IP3 concentrations, puffs are isolated; stochastic resonance occurs when puff summation at optimal noise amplitude triggers global waves.\n"
        },
        {
          "field_a_term": "Global calcium wave (cell-wide propagation)",
          "field_b_term": "Coherent signal output at optimal noise level",
          "note": "Global waves propagate through CICR (calcium-induced calcium release); stochastic resonance predicts a signal-to-noise maximum at intermediate IP3R cluster density — neither too noisy (many puffs) nor too quiet (few puffs).\n"
        },
        {
          "field_a_term": "Calcium oscillation frequency encoding",
          "field_b_term": "Information encoding via stochastic resonance peak",
          "note": "Cells encode signal amplitude in oscillation frequency; SR predicts that optimal noise level maximizes the fidelity of frequency encoding across different IP3 input levels — explaining why cells maintain Ca²⁺ homeostasis at precise levels.\n"
        }
      ],
      "references": [
        {
          "doi": "10.1073/pnas.050347197",
          "note": "Shuai & Jung (2003) — Stochastic properties of Ca²⁺ release of inositol 1,4,5-trisphosphate receptor clusters; PNAS 100:506"
        }
      ],
      "last_reviewed": "2026-05-07",
      "file": "cross-domain/biology-physics/b-calcium-signaling-x-stochastic-resonance.yaml"
    },
    {
      "id": "b-chromatin-loop-extrusion-polymer",
      "title": "Chromatin organisation by cohesin-mediated loop extrusion is quantitatively predicted by polymer-physics models: the Hi-C contact-probability scaling P(s) ~ s^{-0.75} within topologically associating domains (TADs) matches the Rouse/fractal-globule polymer exponent, while TAD boundaries correspond to equilibrium positions of CTCF-stalled extruding cohesin rings.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "Cohesin translocates along chromatin, extruding DNA loops until blocked by convergently oriented CTCF binding sites. The resulting TAD structure is identical to a 1D-extruded polymer loop ensemble. Hi-C contact-probability maps within TADs follow P(s) ~ s^{-0.75}, the fractal-globule scaling predict",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-ctcf-boundary-polymer-wall"
      ],
      "communication_gap": "Chromosome biology and polymer physics developed separate vocabularies and publication venues (Nature/Cell vs. Macromolecules/Soft Matter). The fractal- globule polymer model (Grosberg 1993) was published 23 years before Hi-C experiments confirmed its relevance (Lieberman-Aiden 2009). Cohesin biologists rarely cite polymer-physics derivations.\n",
      "translation_table": [
        {
          "field_a_term": "Cohesin ring translocating along chromatin",
          "field_b_term": "Monomer pair undergoing loop-extrusion dynamics",
          "note": "Cohesin acts as a molecular motor driving loop growth; maps to a Rouse-chain active process"
        },
        {
          "field_a_term": "CTCF boundary element (convergent orientation)",
          "field_b_term": "Fixed polymer boundary condition / reflecting wall",
          "note": "CTCF in convergent orientation halts extrusion, setting loop length distribution"
        },
        {
          "field_a_term": "Topologically associating domain (TAD)",
          "field_b_term": "Polymer loop in fractal-globule conformation",
          "note": "TAD size ~ mean loop length set by cohesin processivity and CTCF density"
        },
        {
          "field_a_term": "Hi-C contact probability P(s)",
          "field_b_term": "Polymer end-to-end contact probability for chain of s monomers",
          "note": "P(s) ~ s^{-0.75} is the fractal-globule prediction; equilibrium globule gives ~s^{-1.5}"
        }
      ],
      "references": [
        {
          "doi": "10.1126/science.1181369",
          "note": "Lieberman-Aiden et al. (2009) Science – Hi-C reveals fractal-globule chromatin organisation"
        },
        {
          "doi": "10.1016/j.cell.2016.05.001",
          "note": "Fudenberg et al. (2016) Cell Reports – loop extrusion MD model quantitatively reproduces Hi-C TADs"
        },
        {
          "doi": "10.1073/pnas.1518552113",
          "note": "Sanborn et al. (2015) PNAS – cohesin extrusion predicts insulation at CTCF sites"
        },
        {
          "doi": "10.1126/science.aaf4831",
          "note": "Rao et al. (2017) Science – cohesin depletion collapses TADs, confirming extrusion mechanism"
        }
      ],
      "last_reviewed": "2026-05-07",
      "file": "cross-domain/biology-physics/b-chromatin-loop-extrusion-polymer.yaml"
    },
    {
      "id": "b-circadian-clocks-nonlinear-oscillators",
      "title": "Circadian clocks are ~24-hour biological limit cycle oscillators arising via Hopf bifurcation in transcription-translation delay feedback loops; entrainment by light follows Arnold tongue theory for periodically forced nonlinear oscillators, and temperature compensation (Q10~1) represents an unsolved problem in biological nonlinear dynamics, bridging molecular biology to dynamical systems theory.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "Circadian clocks are ~24-hour biological oscillators driven by transcription-translation feedback loops. Core mechanism: protein X represses its own transcription with delay tau — a delay differential equation x_dot = f(x(t-tau)) - gamma*x. This produces limit cycle oscillations when tau × |df/dx| >",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-circadian-hopf-bifurcation-delay-oscillator"
      ],
      "communication_gap": "Molecular chronobiology (identifying clock genes, protein interactions) and nonlinear dynamics (Hopf bifurcation, Arnold tongue, delay equations) are conducted in different communities with different mathematical fluency. Goodwin (1965) made the connection originally, but molecular biologists rediscovered clock mechanisms without adopting the mathematical framework systematically until Goldbeter's 1995 model.\n",
      "translation_table": [
        {
          "field_a_term": "transcription-translation feedback loop",
          "field_b_term": "delay differential equation with negative feedback",
          "note": "PER/TIM (Drosophila) or CLOCK/BMAL1/CRY (mammals) are the molecular delay"
        },
        {
          "field_a_term": "onset of circadian oscillation (loss of arrhythmicity in mutants)",
          "field_b_term": "Hopf bifurcation (stable fixed point loses stability, limit cycle emerges)",
          "note": "arrhythmic mutants (per01, clk) correspond to fixed-point stable regime"
        },
        {
          "field_a_term": "entrainment by light (zeitgeber)",
          "field_b_term": "forced nonlinear oscillator response (Arnold tongue)",
          "note": "jet lag = transient period during which oscillator re-synchronizes to new forcing"
        },
        {
          "field_a_term": "temperature compensation (Q10~1)",
          "field_b_term": "parameter-insensitive limit cycle period (robustness to k_cat changes)",
          "note": "unresolved: molecular mechanism that makes tau*k_cat product temperature-insensitive"
        },
        {
          "field_a_term": "spontaneous activity rhythm (free-running period tau_0)",
          "field_b_term": "natural frequency omega_0 of the limit cycle oscillator",
          "note": "tau_0 measured in constant darkness; species-specific due to molecular differences"
        }
      ],
      "references": [
        {
          "note": "Goodwin (1965) Adv Enzyme Regul 3:425 — oscillations in biochemical control systems"
        },
        {
          "doi": "10.1073/pnas.92.20.9107",
          "note": "Goldbeter (1995) PNAS 92:9107 — minimal model for circadian oscillations"
        },
        {
          "doi": "10.1038/nature01078",
          "note": "Reppert & Weaver (2002) Nature 418:935 — molecular architecture of mammalian circadian clock"
        },
        {
          "note": "Roenneberg & Merrow (2016) Curr Biol 26:R432 — circadian clocks in biology"
        }
      ],
      "last_reviewed": "2026-05-06",
      "file": "cross-domain/biology-physics/b-circadian-clocks-nonlinear-oscillators.yaml"
    },
    {
      "id": "b-cochlear-mechanics-hearing-biophysics",
      "title": "The cochlea performs biological Fourier analysis via a graded-stiffness basilar membrane that decomposes sound into frequency components (von Békésy traveling wave), and active outer hair cell electromotility via prestin amplifies this mechanical signal 40-100× through a Hopf bifurcation mechanism that produces otoacoustic emissions and achieves sub-thermal noise sensitivity — violating naive equipartition theorem expectations.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "The cochlea is the biological implementation of a traveling-wave frequency analyzer. It is 35 mm long and tonotopically organized: the base (near the oval window) responds to high frequencies (20 kHz) and the apex to low frequencies (20 Hz). This frequency mapping arises from a graded mechanical str",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-prestin-somatic-motility-primary-cochlear-amplification-mechanism-mammals"
      ],
      "communication_gap": "Auditory neuroscience (publishing in the Journal of Neuroscience, Hearing Research, JARO) focuses on neural coding and auditory cortex; cochlear biophysics is a specialty sub-field known mainly to bioengineers and biophysicists (publishing in Biophysical Journal, JASA). The nonlinear dynamics community (Hopf bifurcation, dynamical systems) rarely reads auditory journals. The prestin molecular biology is published in Nature and Cell but the connection to Hopf oscillator dynamics is rarely made in molecular biology papers. Clinical audiology uses OAEs diagnostically without awareness of the underlying Hopf bifurcation theory.\n",
      "translation_table": [
        {
          "field_a_term": "Basilar membrane stiffness gradient k(x) ∝ e^{-αx} (cochlear mechanics)",
          "field_b_term": "Spatial frequency filter bank for sound decomposition",
          "note": "Graded stiffness implements continuous Fourier analysis along the cochlear length"
        },
        {
          "field_a_term": "Hopf bifurcation parameter μ (nonlinear dynamics)",
          "field_b_term": "Gain of the OHC cochlear amplifier relative to critical point",
          "note": "μ = 0 at critical point; negative μ (damped) passive cochlea; positive μ (unstable)"
        },
        {
          "field_a_term": "Compressive nonlinearity: response ∝ stimulus^{1/3} (Hopf critical)",
          "field_b_term": "40 dB cochlear compression measured in BM velocity vs. SPL",
          "note": "1/3 power law is the universal signature of Hopf critical oscillation"
        },
        {
          "field_a_term": "Prestin voltage-sensitive motor protein (molecular biology)",
          "field_b_term": "Active force actuator providing cycle-by-cycle BM amplification",
          "note": "Only prestin-based somatic motility can operate at acoustic frequencies in mammals"
        },
        {
          "field_a_term": "Equipartition theorem: ⟨½kx²⟩ = ½k_BT at equilibrium",
          "field_b_term": "Thermal noise floor for passive BM — exceeded by active cochlea",
          "note": "Active cochlea achieves sensitivity below thermal noise by active noise reduction"
        },
        {
          "field_a_term": "Otoacoustic emission (OAE)",
          "field_b_term": "Diagnostic biomarker for OHC function — used in newborn hearing screening",
          "note": "OAEs are the physical proof that the cochlea is a nonlinear active oscillator"
        }
      ],
      "references": [
        {
          "note": "von Békésy (1960) Experiments in Hearing, McGraw-Hill — Nobel Prize work establishing the traveling wave theory of cochlear mechanics\n"
        },
        {
          "doi": "10.1016/j.neuron.2008.07.012",
          "note": "Hudspeth (2008) Making an effort to listen: mechanical amplification by myosin molecules and ion channels, Neuron 59:530 — Hopf bifurcation model of the cochlear amplifier\n"
        },
        {
          "doi": "10.1038/35013657",
          "note": "Zheng et al. (2000) Prestin is the motor protein of cochlear outer hair cells, Nature 405:149 — discovery of prestin as the OHC somatic motor\n"
        },
        {
          "doi": "10.1103/PhysRevLett.84.5232",
          "note": "Eguíluz et al. (2000) Essential nonlinearities in hearing, Phys Rev Lett 84:5232 — Hopf bifurcation model of the cochlear amplifier, predicts 1/3 power compression and OAEs\n"
        }
      ],
      "last_reviewed": "2026-05-06",
      "file": "cross-domain/biology-physics/b-cochlear-mechanics-hearing-biophysics.yaml"
    },
    {
      "id": "b-cytoskeleton-x-active-matter",
      "title": "Cytoskeleton x Active matter — motor protein filaments as polar active fluid\n",
      "source_domain": "",
      "target_domain": "",
      "status": "proposed",
      "bridge_claim": "The cytoskeletal network of actin filaments and myosin motors is a biological realization of active matter (polar self-propelled rods); cytoplasmic streaming, cell motility, and mitotic spindle assembly are emergent collective behaviors described by Toner-Tu active hydrodynamics.\n",
      "open_unknowns": [],
      "related_hypotheses": [],
      "communication_gap": "Active matter physics developed theoretically in the 1990s (Toner & Tu 1995); the connection to cytoskeletal dynamics in cell biology was established through in vitro reconstitution experiments in the 2010s.\n",
      "translation_table": [
        {
          "field_a_term": "Actin filament with myosin motor",
          "field_b_term": "Self-propelled rod in active fluid",
          "note": "The actin-myosin unit is the canonical biological active particle; its polarity and self-propulsion map directly onto Toner-Tu polar active matter.\n"
        },
        {
          "field_a_term": "Cytoplasmic streaming vortices",
          "field_b_term": "Spontaneous flow in active fluid",
          "note": "Both arise from the same hydrodynamic instability of the uniform polarized state; Toner-Tu theory predicts the streaming patterns in Drosophila oocytes.\n"
        }
      ],
      "references": [
        {
          "doi": "10.1103/PhysRevLett.75.4326",
          "note": "Toner & Tu (1995) — long-range order in a 2D dynamical XY model: how birds fly together"
        }
      ],
      "last_reviewed": "2026-05-07",
      "file": "cross-domain/biology-physics/b-cytoskeleton-x-active-matter.yaml"
    },
    {
      "id": "b-developmental-turing-instability",
      "title": "Turing's (1952) reaction-diffusion instability — activator A (slow diffusion) and inhibitor I (fast diffusion, D_I >> D_A) spontaneously break spatial homogeneity at wavenumber k* = √(f_A/D_A) — experimentally confirmed in zebrafish skin pigmentation, digit spacing via Sox9/BMP feedback, and arid-hillside tiger-bush vegetation patterns.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "Turing (1952) showed that a homogeneous steady state of a two-morphogen reaction- diffusion system can be stable to spatially uniform perturbations but unstable to spatially periodic perturbations — a diffusion-driven instability. Consider: ∂A/∂t = f(A,I) + D_A ∇²A and ∂I/∂t = g(A,I) + D_I ∇²I. Line",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-turing-digit-count-bmp-gradient-wavelength-scaling"
      ],
      "communication_gap": "Turing's 1952 paper was written by a mathematician in a biology context and largely ignored by developmental biologists for 40 years — Wolpert's positional information model (1969) dominated developmental biology. The Kondo-Asai paper (1995) finally demonstrated biological reality of Turing patterns in zebrafish, but in Nature (biological journal). Mathematical analysis of Turing systems is published in J Math Biol, SIAM J Appl Math, and J Theor Biol — journals rarely read by experimental developmental biologists. Measurement of D_A and D_I in vivo requires biophysics expertise (FRAP, FCS) rarely available in developmental biology labs.\n",
      "translation_table": [
        {
          "field_a_term": "activator A (short-range self-activation, slow diffusion D_A)",
          "field_b_term": "melanophore / Sox9 transcription factor / plant biomass (biology)"
        },
        {
          "field_a_term": "inhibitor I (long-range lateral inhibition, fast diffusion D_I)",
          "field_b_term": "xanthophore / BMP signal / soil water (biology)"
        },
        {
          "field_a_term": "Turing wavenumber k* = (f_A/D_A)^{1/2}",
          "field_b_term": "inverse of pattern wavelength (stripe spacing, digit spacing)"
        },
        {
          "field_a_term": "diffusion ratio D_I/D_A >> 1 (Turing instability condition)",
          "field_b_term": "requirement for local activation + lateral inhibition in morphogenesis"
        },
        {
          "field_a_term": "linear stability eigenvalue crossing zero (bifurcation)",
          "field_b_term": "symmetry-breaking transition from homogeneous to patterned developmental field"
        },
        {
          "field_a_term": "pattern wavelength λ ∝ √(D_A/f_A)",
          "field_b_term": "species-specific scaling of anatomical features (limb digit count)"
        }
      ],
      "references": [
        {
          "doi": "10.1098/rstb.1952.0012",
          "note": "Turing (1952) Philos Trans R Soc B 237:37 — chemical basis of morphogenesis"
        },
        {
          "doi": "10.1038/376765a0",
          "note": "Kondo & Asai (1995) Nature 376:765 — reaction-diffusion wave on the skin of Pomacanthus"
        },
        {
          "doi": "10.1126/science.1226804",
          "note": "Sheth et al. (2012) Science 338:1476 — Hox genes regulate digit patterning by controlling Turing mechanism"
        },
        {
          "doi": "10.1126/science.1104284",
          "note": "Rietkerk et al. (2004) Science 305:1926 — self-organized patchiness and catastrophic shifts in ecosystems"
        }
      ],
      "last_reviewed": "2026-05-06",
      "file": "cross-domain/biology-physics/b-developmental-turing-instability.yaml"
    },
    {
      "id": "b-dna-mechanics-chromatin",
      "title": "DNA as a semiflexible polymer (persistence length l_p ≈ 50 nm, worm-like chain model) and chromatin loop extrusion by cohesin/CTCF generating topologically associating domains bridges polymer physics and structural biology to explain 3D genome organization and gene regulation.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "DNA is a semiflexible polymer characterized by its persistence length l_p ≈ 50 nm (150 bp) — the length scale over which thermal fluctuations bend the molecule by ~1 radian. At scales shorter than l_p, DNA behaves as a rigid rod; at scales much longer than l_p, it is a random coil with ⟨r²⟩ = 2l_p·L",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-tad-boundary-disruption-ctcf-site-oncogene-activation-quantitative"
      ],
      "communication_gap": "DNA polymer physics (published in J Chem Phys, Macromolecules, Biophys J) uses Kratky-Porod model and path integral formulations inaccessible to most molecular biologists. Molecular biologists doing chromatin biology (Cell, Nature, Science) rarely engage with the polymer physics literature. The topological language (writhe, linking number, White-Calugareanu) requires training in knot theory that most biologists lack. Hi-C data analysis borrows from statistical physics (mean-field compartment models) but the connection is rarely made explicit. Loop extrusion theory was proposed by polymer physicists (Alipour-Marko 2012) but only adopted by cell biologists after direct experimental visualization.\n",
      "translation_table": [
        {
          "field_a_term": "DNA persistence length l_p = 50 nm",
          "field_b_term": "bending stiffness κ = k_BT·l_p in Kratky-Porod WLC Hamiltonian"
        },
        {
          "field_a_term": "mean-squared end-to-end distance ⟨r²⟩ = 2l_p·L (WLC)",
          "field_b_term": "diffusion of a random walker: ⟨r²⟩ = 2D·t with D = v·l_p"
        },
        {
          "field_a_term": "linking number Lk (topological invariant of closed DNA)",
          "field_b_term": "winding number / topological charge in field theory"
        },
        {
          "field_a_term": "writhe Wr = Lk - Tw (supercoiling)",
          "field_b_term": "geometric phase (Berry phase analog) of a polymer"
        },
        {
          "field_a_term": "topoisomerase II (changes Lk by ±2)",
          "field_b_term": "topology-changing operator in knot theory"
        },
        {
          "field_a_term": "nucleosome bending energy ≈ 40 kBT",
          "field_b_term": "activation barrier in a statistical mechanical system"
        },
        {
          "field_a_term": "cohesin loop extrusion (CTCF boundaries)",
          "field_b_term": "active (non-equilibrium) polymer loop formation by molecular motors"
        },
        {
          "field_a_term": "topologically associating domain (TAD)",
          "field_b_term": "phase-separated compartment in a block copolymer system"
        }
      ],
      "references": [
        {
          "doi": "10.1126/science.265.5178.1599",
          "note": "Bustamante et al. (1994) Science 265:1599 — entropic elasticity of λ-phage DNA (WLC experimental validation)"
        },
        {
          "doi": "10.2307/2373093",
          "note": "White (1969) Am J Math 91:693 — self-linking and the Gauss integral in higher dimensions (Lk=Tw+Wr)"
        },
        {
          "doi": "10.1016/j.celrep.2016.04.085",
          "note": "Fudenberg et al. (2016) Cell Rep 15:2038 — formation of chromosomal domains by loop extrusion"
        },
        {
          "doi": "10.1126/science.295.5558.1306",
          "note": "Dekker et al. (2002) Science 295:1306 — capturing chromosome conformation (Hi-C precursor)"
        }
      ],
      "last_reviewed": "2026-05-06",
      "file": "cross-domain/biology-physics/b-dna-mechanics-chromatin.yaml"
    },
    {
      "id": "b-hair-cell-bundle-x-hopf-bifurcation",
      "title": "Hair cell bundle x Hopf bifurcation — auditory amplification at the edge of oscillation\n",
      "source_domain": "",
      "target_domain": "",
      "status": "proposed",
      "bridge_claim": "The inner ear hair cell bundle operates at a Hopf bifurcation point, producing active mechanical amplification with a characteristic 1/3 power compression and sharp frequency selectivity; this is the same nonlinear dynamics as a laser at threshold and is responsible for the ear's extraordinary sensi",
      "open_unknowns": [],
      "related_hypotheses": [],
      "communication_gap": "Auditory neuroscientists measuring hair cell mechanics and physicists studying nonlinear oscillator theory developed parallel frameworks; the identification of the Hopf bifurcation as the operating principle of hearing occurred in 2000 and cross-disciplinary collaboration between biophysicists and nonlinear dynamicists remains limited.\n",
      "translation_table": [
        {
          "field_a_term": "hair cell bundle spontaneous oscillation (neuroscience)",
          "field_b_term": "limit cycle near Hopf bifurcation point (nonlinear dynamics)",
          "note": "The bundle oscillates spontaneously below threshold; this is the hallmark of a system at a supercritical Hopf bifurcation"
        },
        {
          "field_a_term": "1/3 power compression in auditory response (auditory neuroscience)",
          "field_b_term": "cube-root response of driven Hopf oscillator (dynamical systems)",
          "note": "Driven at a Hopf bifurcation, amplitude scales as stimulus^(1/3) — the mathematical origin of auditory compression"
        },
        {
          "field_a_term": "frequency selectivity / Q factor (auditory physics)",
          "field_b_term": "sharpness of resonance at bifurcation (nonlinear dynamics)",
          "note": "The Hopf bifurcation maximally sharpens frequency tuning while maintaining amplitude sensitivity simultaneously"
        }
      ],
      "references": [
        {
          "doi": "10.1073/pnas.97.7.3183",
          "note": "Camalet et al. (2000) - Auditory sensitivity provided by self-tuned critical oscillations of hair cells; PNAS 97:3183"
        }
      ],
      "last_reviewed": "2026-05-07",
      "file": "cross-domain/biology-physics/b-hair-cell-bundle-x-hopf-bifurcation.yaml"
    },
    {
      "id": "b-hair-cells-mechanosensory-biophysics",
      "title": "Inner ear hair cells bridge biology and physics: tip-link gating springs open mechanotransduction channels with Boltzmann-distributed open probability, and spontaneous otoacoustic emissions reveal operation near a Hopf bifurcation providing active amplification at the thermodynamic limit.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "Inner hair cells (IHCs, ~3,500 per human cochlea) transduce basilar membrane vibration into auditory nerve signals. The mechanotransduction (MET) channel is gated by tip links (cadherin-23/protocadherin-15 heterodimers) connecting adjacent stereocilia. Channel open probability follows a two-state Bo",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-hopf-bifurcation-universal-mechanism-vertebrate-hair-cell-amplification"
      ],
      "communication_gap": "Hair cell biologists who characterize MET currents electrophysiologically rarely engage with the nonlinear dynamics literature on Hopf bifurcations. Physicists who develop the theory of active amplification and bifurcation physics rarely interface with the molecular biology of tip-link proteins. The unification of molecular mechanism and dynamical systems theory is incomplete.\n",
      "translation_table": [
        {
          "field_a_term": "tip link (cadherin-23 / protocadherin-15)",
          "field_b_term": "gating spring with stiffness K_GS (classical spring mechanics)",
          "note": "tip link tension directly opens the MET channel via gating spring model"
        },
        {
          "field_a_term": "MET channel open probability P_o",
          "field_b_term": "two-state Boltzmann distribution (same as voltage-gated channels)",
          "note": "statistical mechanics describes channel gating; thermal fluctuations set sensitivity limit"
        },
        {
          "field_a_term": "spontaneous hair bundle oscillation",
          "field_b_term": "nonlinear oscillator near a supercritical Hopf bifurcation",
          "note": "active amplification without resonance; bifurcation physics determines gain and tuning"
        },
        {
          "field_a_term": "spontaneous otoacoustic emission (SOAE)",
          "field_b_term": "coherent sound emission from a driven nonlinear oscillator",
          "note": "the ear emits sound — measurable physical evidence of active mechanical amplification"
        },
        {
          "field_a_term": "basilar membrane compressive nonlinearity (40 dB dynamic range)",
          "field_b_term": "power-law gain compression near Hopf bifurcation: G ∝ F^{-2/3}",
          "note": "explains why loudness grows as ~30th root of intensity (Stevens power law)"
        }
      ],
      "references": [
        {
          "doi": "10.1038/341397a0",
          "note": "Hudspeth (1989) How the ear's works work; Nature 341:397"
        },
        {
          "doi": "10.1152/physrev.00026.2013",
          "note": "Fettiplace & Kim (2014) The physiology of mechanoelectrical transduction channels in hearing; Physiol Rev 94:951"
        },
        {
          "doi": "10.1073/pnas.261510898",
          "note": "Martin et al. (2001) Comparison of a hair bundle's spontaneous oscillations with its response to mechanical stimulation; PNAS 98:14380"
        },
        {
          "doi": "10.1523/JNEUROSCI.03-05-00962.1983",
          "note": "Corey & Hudspeth (1983) Kinetics of the receptor current in bullfrog saccular hair cells; J Neurosci 3:962"
        }
      ],
      "last_reviewed": "2026-05-07",
      "file": "cross-domain/biology-physics/b-hair-cells-mechanosensory-biophysics.yaml"
    },
    {
      "id": "b-intrinsically-disordered-proteins-polymer-physics",
      "title": "Intrinsically disordered proteins (IDPs) are polyelectrolyte chains whose conformational ensemble follows Flory polymer scaling: radius of gyration Rg ~ N^ν with ν≈0.59 (good solvent) for highly charged IDPs",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "Intrinsically disordered proteins (IDPs) lack a stable folded structure and exist as dynamic conformational ensembles. Polymer physics provides the quantitative framework: for a chain of N residues with persistence length l_p and excluded volume interactions, the radius of gyration Rg scales as Rg ~",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-intrinsically-disordered-proteins-polymer-physics"
      ],
      "communication_gap": "IDP biologists characterize sequences using charge and hydrophobicity plots without connecting to the polymer physics literature where Flory scaling, polyelectrolyte theory, and coil-to-globule transitions are quantitatively developed. Polymer physicists have the theoretical framework but IDP experimental data (smFRET, SAXS time series, condensate phase diagrams) are largely in biology journals not read by polymer scientists.\n",
      "translation_table": [
        {
          "field_a_term": "IDP amino acid sequence (primary structure)",
          "field_b_term": "sequence of monomer types with defined charge, hydrophobicity, stiffness",
          "note": "NCPR (net charge per residue) and hydropathy score determine the polymer universality class"
        },
        {
          "field_a_term": "IDP radius of gyration Rg from SAXS/smFRET",
          "field_b_term": "Flory Rg = b·N^ν with fitted Flory exponent ν",
          "note": "Single-molecule FRET gives Rg in different salt concentrations, testing polyelectrolyte theory"
        },
        {
          "field_a_term": "IDP collapse upon binding a partner (disorder-to-order transition)",
          "field_b_term": "coil-to-globule transition driven by attractive interactions (poor solvent regime)",
          "note": "Binding energy drives ν from 0.6 toward 1/3, shrinking the conformational ensemble"
        },
        {
          "field_a_term": "IDP phase separation into biomolecular condensates",
          "field_b_term": "liquid-liquid phase separation of a polymer solution (Flory-Huggins theory)",
          "note": "IDP condensates form when χ parameter (interaction strength) exceeds critical value"
        }
      ],
      "references": [
        {
          "doi": "10.1038/nsmb.2018",
          "note": "Uversky (2013) Intrinsically disordered proteins and novel drug discovery. Expert Opin Drug Discov 7:475"
        },
        {
          "doi": "10.1021/acs.biochem.7b00786",
          "note": "Holehouse & Pappu (2018) Functional implications of intracellular phase transitions. Biochemistry 57:2415"
        },
        {
          "doi": "10.1073/pnas.0802203105",
          "note": "Müller-Späth et al. (2010) Charge interactions can dominate coupled folding and binding of two disordered proteins. PNAS 107:14609"
        }
      ],
      "last_reviewed": "2026-05-07",
      "file": "cross-domain/biology-physics/b-intrinsically-disordered-proteins-polymer-physics.yaml"
    },
    {
      "id": "b-mechanosensing-x-force-transduction",
      "title": "Mechanosensing ↔ Force transduction — cell stiffness as Hookean spring network",
      "source_domain": "",
      "target_domain": "",
      "status": "proposed",
      "bridge_claim": "Cells sense substrate stiffness via integrin-mediated focal adhesions that behave as Hookean spring networks; the cell's cytoskeletal prestress tunes its resonant frequency to match substrate rigidity, implementing a mechanical impedance-matching circuit analogous to transmission line theory in elec",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-mechanosensing-x-force-transduction"
      ],
      "communication_gap": "Cell biology and solid mechanics evolved in separate communities: biologists studying integrin signalling focused on biochemical cascades (FAK, Src kinases), while physicists and engineers studying elastic networks focused on material properties. The synthesis required biophysicists (Discher, Bhanu Bhanu Bhanu Janmey) who spanned both communities and performed traction force microscopy experiments to directly measure cell-matrix mechanical coupling.",
      "translation_table": [
        {
          "field_a_term": "focal adhesion complex (integrin clusters linking ECM to cytoskeleton)",
          "field_b_term": "mechanical spring network node (force transduction junction)",
          "note": "Focal adhesions are the compliance-sensing elements; stiffness is read via force/displacement ratio"
        },
        {
          "field_a_term": "cytoskeletal prestress (actomyosin tension in F-actin network)",
          "field_b_term": "pre-tension in spring network (sets resonant frequency)",
          "note": "Prestress tunes cell stiffness; stiffer substrates recruit more actomyosin"
        },
        {
          "field_a_term": "substrate rigidity (Young's modulus E of extracellular matrix)",
          "field_b_term": "spring constant k of Hookean substrate spring",
          "note": "E ranges from 0.1 kPa (brain) to 40 kPa (bone); cell differentiates accordingly"
        },
        {
          "field_a_term": "durotaxis — cell migration toward stiffer substrate",
          "field_b_term": "mechanical impedance matching — system tunes to match load",
          "note": "Cells preferentially adhere and migrate on substrates matching their own stiffness"
        }
      ],
      "references": [
        {
          "doi": "10.1016/j.cell.2006.06.044",
          "note": "Discher, Janmey & Wang (2005) — tissue cells feel and respond to substrate stiffness; Science"
        },
        {
          "doi": "10.1038/nmat1134",
          "note": "Engler et al. (2006) — matrix elasticity directs stem cell lineage specification; Cell 126:677"
        }
      ],
      "last_reviewed": "2026-05-07",
      "file": "cross-domain/biology-physics/b-mechanosensing-x-force-transduction.yaml"
    },
    {
      "id": "b-membrane-curvature-vesicle-formation",
      "title": "Lipid membrane shapes — from red blood cell discocytes to endocytic vesicles — are governed by the Helfrich bending energy functional, connecting elastic continuum mechanics to cell biology and protein-sculpted membrane remodelling.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "Lipid bilayer membranes resist bending with bending modulus κ ≈ 10–20 k_BT. The Helfrich bending energy is F = ½κ∫(2H − c₀)²dA + κ_G∫K dA, where H is the mean curvature, K is the Gaussian curvature, c₀ is the spontaneous curvature (set by membrane asymmetry), and κ_G is the Gaussian modulus. Minimiz",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-bar-domain-curvature-sensing-threshold"
      ],
      "communication_gap": "Helfrich's 1973 paper appeared in a biophysics journal but was largely ignored by cell biologists for two decades. The experimentally accessible prediction (vesicle shape diagrams) was confirmed by Seifert's group in the 1990s, but the connection to membrane trafficking proteins only became appreciated with the discovery of BAR domains (McMahon & Gallop 2005). Cell biologists continue to describe endocytosis mechanistically without invoking the elastic mechanics framework that unifies these processes.\n",
      "translation_table": [
        {
          "field_a_term": "mean curvature H = ½(κ₁ + κ₂)",
          "field_b_term": "local membrane shape at each point of the bilayer"
        },
        {
          "field_a_term": "spontaneous curvature c₀",
          "field_b_term": "membrane compositional asymmetry / BAR domain curvature preference"
        },
        {
          "field_a_term": "bending modulus κ (J)",
          "field_b_term": "membrane stiffness determining vesicle fluctuation amplitude"
        },
        {
          "field_a_term": "Gaussian curvature K = κ₁κ₂",
          "field_b_term": "topological curvature — integral over closed surface = 4π (Gauss-Bonnet)"
        },
        {
          "field_a_term": "area-difference elasticity (ADE) model",
          "field_b_term": "RBC shape diagram parameterized by reduced volume and area difference"
        },
        {
          "field_a_term": "stomatocyte↔discocyte↔echinocyte transitions",
          "field_b_term": "osmotic stress responses and disease-related RBC morphology changes"
        },
        {
          "field_a_term": "critical radius for tubulation (R_c = √(κ/2σ))",
          "field_b_term": "minimum neck radius for vesicle budding or membrane tubule formation"
        }
      ],
      "references": [
        {
          "note": "Helfrich (1973) Elastic properties of lipid bilayers. Z Naturforsch 28c:693"
        },
        {
          "doi": "10.1080/00018739700101488",
          "note": "Seifert (1997) Configurations of fluid membranes and vesicles. Adv Phys 46:13"
        },
        {
          "doi": "10.1038/nrm1784",
          "note": "Zimmerberg & Kozlov (2006) How proteins produce cellular membrane curvature. Nat Rev Mol Cell Biol 7:9"
        },
        {
          "doi": "10.1038/s41467-019-10346-6",
          "note": "Bhaskara et al. (2019) Curvature induction and membrane remodeling by FAM134B. Nat Commun 10:2347"
        }
      ],
      "last_reviewed": "2026-05-06",
      "file": "cross-domain/biology-physics/b-membrane-curvature-vesicle-formation.yaml"
    },
    {
      "id": "b-membrane-tension-x-laplace-pressure",
      "title": "Cell membrane tension x Laplace pressure — Young-Laplace equation in biology\n",
      "source_domain": "",
      "target_domain": "",
      "status": "proposed",
      "bridge_claim": "The pressure difference across a curved cell membrane is given by the Young-Laplace equation delta_P = 2 * gamma / R (for spherical cells), where gamma is cortical tension; this governs cell shape during division, bleb formation, and tissue surface tension in embryogenesis — the same physics as soap",
      "open_unknowns": [],
      "related_hypotheses": [],
      "communication_gap": "Cell biologists studying cortical mechanics and physicists studying capillarity and surface tension developed parallel quantitative frameworks; the DAH (Steinberg 1963) applied surface tension concepts to tissues but quantitative measurement of cortical tension by atomic force microscopy only became routine in the 2000s, accelerating cross-disciplinary collaboration.\n",
      "translation_table": [
        {
          "field_a_term": "cortical tension gamma (cell biology)",
          "field_b_term": "surface tension gamma in Young-Laplace equation (physics)",
          "note": "Cortical actomyosin tension in cells is the biological analog of surface tension; both resist membrane curvature"
        },
        {
          "field_a_term": "cell bleb nucleation (cell biology)",
          "field_b_term": "bubble nucleation at surface tension minimum (physics)",
          "note": "Blebbing occurs when cortical tension drops locally, causing Laplace pressure to drive membrane protrusion"
        },
        {
          "field_a_term": "tissue surface tension in embryogenesis (developmental biology)",
          "field_b_term": "interfacial tension between immiscible fluids (physics)",
          "note": "Differential adhesion hypothesis (DAH) describes tissues as fluids; tissue-tissue interfacial tension follows Young's equation"
        },
        {
          "field_a_term": "cytokinesis furrow ingression (cell biology)",
          "field_b_term": "pinch-off of a fluid thread by surface tension (physics)",
          "note": "The Rayleigh-Plateau instability governs the dynamics of cytokinetic ring ingression and final abscission"
        }
      ],
      "references": [
        {
          "doi": "10.1016/j.cub.2013.05.044",
          "note": "Salbreux, Charras & Paluch (2012) - Actin cortex mechanics and cellular morphogenesis; Trends Cell Biol 22:536"
        }
      ],
      "last_reviewed": "2026-05-07",
      "file": "cross-domain/biology-physics/b-membrane-tension-x-laplace-pressure.yaml"
    },
    {
      "id": "b-metabolic-scaling-fractal-vasculature",
      "title": "Kleiber's 3/4-power metabolic scaling law (B ~ M^{3/4}) across animals spanning 27 orders of magnitude in body mass is derived from the fractal geometry of space-filling vascular networks: West, Brown & Enquist (1997) proved that the 4/3 exponent arises necessarily from the constraint that hierarchical branching networks minimise hydrodynamic resistance while filling volume fractally.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "West, Brown & Enquist (1997) derived Kleiber's law from three assumptions: (1) the vascular network is a self-similar fractal with branching ratio n_b, (2) the terminal units (capillaries/leaf stomata) are size-invariant, (3) the network minimises the total hydrodynamic resistance (evolutionary opti",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-metabolic-exponent-network-dimension-prediction"
      ],
      "communication_gap": "Physiologists measuring metabolic rates and network engineers studying fractal branching rarely read the same literature. The West-Brown-Enquist paper (1997 Science) sparked considerable controversy in physiology journals; the derivation is more widely accepted among network theorists than among comparative physiologists who dispute the universality of the 3/4 exponent.\n",
      "translation_table": [
        {
          "field_a_term": "Kleiber's law B ~ M^{3/4} (metabolic rate vs. body mass)",
          "field_b_term": "Allometric exponent = d/(d+1) from fractal network geometry",
          "note": "3D space-filling network gives 3/4; 2D network gives 2/3; surface-limited gives 2/3"
        },
        {
          "field_a_term": "Vascular branching: artery → capillary hierarchy",
          "field_b_term": "Self-similar fractal branching network (Horton-Strahler order)",
          "note": "Each branching level preserves the volume-filling constraint; radius ratio = n_b^{-1/3}"
        },
        {
          "field_a_term": "Capillary size (invariant across species)",
          "field_b_term": "Fixed terminal unit of the fractal: size-invariant boundary condition",
          "note": "Capillary diameter ~5 μm, flow rate ~1 mm/s invariant from mouse to whale"
        },
        {
          "field_a_term": "Minimum hydrodynamic resistance (Murray's law: r₃ = Σrᵢ³)",
          "field_b_term": "Optimal transport network (Wasserstein/minimum cost-flow)",
          "note": "Murray's law is the optimality condition; equivalent to minimum cost branching in optimal transport"
        }
      ],
      "references": [
        {
          "doi": "10.1126/science.276.5309.122",
          "note": "West, Brown & Enquist (1997) Science – a general model for the origin of allometric scaling laws; fractal vasculature"
        },
        {
          "doi": "10.1126/science.284.5420.1677",
          "note": "Banavar et al. (1999) Science – size and form in efficient transportation networks"
        },
        {
          "doi": "10.1086/303427",
          "note": "Enquist et al. – allometric scaling of plant energetics and population density"
        },
        {
          "doi": "10.1038/35009076",
          "note": "Dreyer & Puzio (2001) – test of the WBE model; fractal geometry and Kleiber's law"
        }
      ],
      "last_reviewed": "2026-05-07",
      "file": "cross-domain/biology-physics/b-metabolic-scaling-fractal-vasculature.yaml"
    },
    {
      "id": "b-metabolic-scaling-x-fractal-transport",
      "title": "West–Brown–Enquist style metabolic scaling links whole-organism metabolic rate to fractal-like transport network geometry, connecting Kleiber’s 3/4 observation to space-filling resource delivery.",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "Metabolic scaling laws relate resting metabolic rate B to body mass M as a power law B ∝ M^α with α often near 3/4 across taxa. The WBE theory explains this exponent via hierarchical branching networks that are area-preserving and space-filling, yielding a dimensionally consistent scaling of effecti",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-kleiber-exponent-from-fractal-like-transport-networks"
      ],
      "communication_gap": "Empirical biologists debate the universality of 3/4; physicists and theorists emphasize geometric derivations. The two literatures intersect in reviews but rarely share falsifiable geometric datasets.",
      "translation_table": [
        {
          "field_a_term": "branching ratio and length scaling",
          "field_b_term": "vessel generations and terminal exchange units"
        },
        {
          "field_a_term": "space-filling constraint",
          "field_b_term": "tissue perfusion requirements across 3D volumes"
        },
        {
          "field_a_term": "effective transport dimension",
          "field_b_term": "empirical scaling exponent α"
        }
      ],
      "references": [
        {
          "doi": "10.1126/science.284.5420.1677",
          "note": "West, Brown & Enquist (1999) — metabolic scaling from fractal-like transport"
        },
        {
          "doi": "10.1038/35013188",
          "note": "West, Brown & Enquist (1999) Nature companion/related scaling discussion (canonical WBE cluster)"
        }
      ],
      "last_reviewed": "2026-05-07",
      "file": "cross-domain/biology-physics/b-metabolic-scaling-x-fractal-transport.yaml"
    },
    {
      "id": "b-morphogenesis-mechanical-forces",
      "title": "Tissue morphogenesis — the shaping of embryos and organs — is driven by mechanical forces (surface tension, actomyosin contractility, elastic buckling) governed by the same physical laws as soft condensed matter, bridging cell biology to continuum mechanics and explaining how cells collectively sculpture 3D anatomy from a flat sheet.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "The differential adhesion hypothesis (Steinberg 1963): tissues sort like immiscible liquids because cells maximise adhesion energy by segregating into phases. Cell surface tension γ_AB = (W_AA + W_BB)/2 - W_AB, where W_ij is the work of adhesion between cell types i and j. Tissues with higher cohesi",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-vertex-model-cortical-gyrification-mechanics"
      ],
      "communication_gap": "Developmental biology emphasised molecular signals (Wnt, Shh, BMP pathways) for 40 years after the molecular biology revolution, treating mechanics as secondary. Biophysicists (Ingber, Discher, Fletcher) developed cell mechanics in parallel using physics instrumentation (AFM, optical traps, micropipettes) but rarely collaborated with developmental biologists. Steinberg's DAH (1963) was controversial for 40 years because the molecular mechanism (cadherin differential expression) was not identified until 1986. The vertex model and active matter frameworks for tissues emerged from soft condensed matter physics (Marchetti group, Bi et al.) and are still not taught in standard cell biology courses. Developmental biologists and physicists attend different meetings and publish in mostly separate journals.\n",
      "translation_table": [
        {
          "field_a_term": "cell-cell adhesion energy W_ij",
          "field_b_term": "surface energy in liquid-liquid interface (interfacial tension)"
        },
        {
          "field_a_term": "actomyosin cortical tension T_cortex",
          "field_b_term": "active surface tension of a self-propelled interface"
        },
        {
          "field_a_term": "tissue sorting (Steinberg, cells of type A engulf type B)",
          "field_b_term": "Rayleigh-Taylor instability (denser fluid below lighter → stable)"
        },
        {
          "field_a_term": "vertex model cell energy E = K(A-A₀)² + ΓP²",
          "field_b_term": "elastic energy of a 2D foam / network (soft matter physics)"
        },
        {
          "field_a_term": "shape index p₀ = P₀/√A₀ (rigidity transition marker)",
          "field_b_term": "packing fraction in jamming transition (particulate matter)"
        },
        {
          "field_a_term": "cortical buckling (gyrification)",
          "field_b_term": "Euler buckling of a thin elastic plate under compression"
        },
        {
          "field_a_term": "T1 transition (cell neighbour exchange)",
          "field_b_term": "plastic rearrangement event in amorphous solid"
        },
        {
          "field_a_term": "Young-Laplace pressure ΔP = 2γ/R across tissue interface",
          "field_b_term": "capillary pressure across liquid-gas interface"
        }
      ],
      "references": [
        {
          "doi": "10.1126/science.141.3579.401",
          "note": "Steinberg (1963) Science 141:401 — differential adhesion hypothesis"
        },
        {
          "doi": "10.1016/0022-5193(83)90161-4",
          "note": "Honda (1983) J Theor Biol 106:423 — vertex model of epithelial sheets"
        },
        {
          "doi": "10.1016/j.cub.2007.11.049",
          "note": "Farhadifar et al. (2007) Curr Biol 17:2095 — vertex model mechanics"
        },
        {
          "doi": "10.1038/nphys3655",
          "note": "Tallinen et al. (2016) Nat Phys 12:588 — cortical folding by differential growth"
        }
      ],
      "last_reviewed": "2026-05-06",
      "file": "cross-domain/biology-physics/b-morphogenesis-mechanical-forces.yaml"
    },
    {
      "id": "b-morphogenesis-x-mechanical-instability",
      "title": "Morphogenesis ↔ Mechanical instability — tissue folding as Euler buckling",
      "source_domain": "",
      "target_domain": "",
      "status": "proposed",
      "bridge_claim": "Brain cortical folding, gut villus formation, and lung branching morphogenesis all arise from compressive mechanical instabilities (Euler buckling, Rayleigh-Taylor instability) in elastic sheets; gyrification depth and fold spacing are predicted by the bilayer bending modulus ratio and confinement g",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-morphogenesis-x-mechanical-instability"
      ],
      "communication_gap": "Developmental biology studied morphogenesis through molecular signalling (Turing patterns, Wnt, BMP gradients) while mechanics of elastic buckling was developed in structural engineering. The prediction that pure mechanical instability could generate brain folding patterns — without molecular pre-patterning — was resisted by molecular biologists. The mechanical morphogenesis paradigm gained traction only after Tallinen et al. (2016) showed a physically realistic gel brain model develops human-like gyrification patterns.",
      "translation_table": [
        {
          "field_a_term": "cortical folding / gyrification (brain surface morphology)",
          "field_b_term": "Euler buckling of a compressed elastic bilayer (mechanics)",
          "note": "Cortex (stiff layer, μ₁) + subcortex (soft layer, μ₂) buckles when μ₁/μ₂ > threshold"
        },
        {
          "field_a_term": "fold spacing λ (distance between adjacent gyri)",
          "field_b_term": "most unstable wavelength λ* of Euler buckling mode",
          "note": "λ* = 2πh(μ₁/3μ₂)^(1/3) where h is cortex thickness — matches observed gyrification"
        },
        {
          "field_a_term": "villus/crypt periodicity in intestinal mucosa",
          "field_b_term": "Rayleigh-Taylor instability wavelength in two-layer elastic system",
          "note": "Gut epithelial layer (stiff) on submucosa (soft) buckles under growth-induced compression"
        },
        {
          "field_a_term": "lissencephaly (smooth brain) — genetic mutation reducing cortex stiffness",
          "field_b_term": "buckling suppression when stiffness ratio μ₁/μ₂ < critical value",
          "note": "LIS1 mutations reduce cortex growth rate, preventing compression needed for buckling"
        }
      ],
      "references": [
        {
          "doi": "10.1038/nphys3632",
          "note": "Tallinen et al. (2016) — On the growth and form of cortical convolutions; Nature Phys 12:588"
        },
        {
          "doi": "10.1073/pnas.1406015111",
          "note": "Tallinen & Biggins (2015) — mechanics of convoluted epithelial monolayers; PNAS"
        }
      ],
      "last_reviewed": "2026-05-07",
      "file": "cross-domain/biology-physics/b-morphogenesis-x-mechanical-instability.yaml"
    },
    {
      "id": "b-muscle-crossbridge-sliding-filament",
      "title": "Muscle force generation is a stochastic cross-bridge cycle: Huxley's rate equations for myosin attachment/detachment map onto a driven Markov chain whose ensemble average gives the force-velocity curve",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "The Huxley (1957) sliding filament model describes myosin head binding to actin as a continuous-time Markov process: a myosin head at position x relative to the nearest actin site transitions from unbound to bound at rate f(x) and back at rate g(x), with x changing at the sliding velocity v. Each bo",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-muscle-crossbridge-sliding-filament"
      ],
      "communication_gap": "Muscle physiologists derive the Hill curve empirically and use Huxley's rate equations without connecting to the broader framework of molecular motors and non-equilibrium statistical mechanics. The mathematical equivalence between muscle cross-bridge theory and Brownian ratchet models was established in the 1990s biophysics literature but is not standard teaching in physiology.\n",
      "translation_table": [
        {
          "field_a_term": "myosin cross-bridge power stroke (biophysics)",
          "field_b_term": "forward transition in asymmetric Markov chain consuming one ATP",
          "note": "ATP hydrolysis provides the free energy that makes the chain non-equilibrium (breaks detailed balance)"
        },
        {
          "field_a_term": "Hill force-velocity curve F(v)",
          "field_b_term": "current-force relationship of a driven non-equilibrium Markov motor",
          "note": "Both show the same hyperbolic form; parameters determined by rate constants f(x), g(x)"
        },
        {
          "field_a_term": "isometric force F₀ (at v=0)",
          "field_b_term": "stall force of molecular motor — maximum force at zero velocity",
          "note": "Stall force is reached when ATP free energy exactly balances mechanical work done"
        },
        {
          "field_a_term": "twitch tension transients after length step",
          "field_b_term": "non-equilibrium relaxation of the Markov chain to new steady state",
          "note": "Multi-exponential relaxation reflects multiple time constants in f(x), g(x) rate functions"
        }
      ],
      "references": [
        {
          "doi": "10.1016/0301-4622(57)90060-4",
          "note": "Huxley AF (1957) Muscle structure and theories of contraction. Prog Biophys Biophys Chem 7:255"
        },
        {
          "doi": "10.1098/rspb.1938.0050",
          "note": "Hill AV (1938) The heat of shortening and the dynamic constants of muscle. Proc R Soc B 126:136"
        },
        {
          "doi": "10.1126/science.271.5245.70",
          "note": "Finer et al. (1994) Single myosin molecule mechanics — piconewton forces and nanometre steps. Nature 368:113"
        }
      ],
      "last_reviewed": "2026-05-07",
      "file": "cross-domain/biology-physics/b-muscle-crossbridge-sliding-filament.yaml"
    },
    {
      "id": "b-muscle-mechanics-x-crossbridge-theory",
      "title": "Muscle Mechanics x Crossbridge Theory - force-velocity as stochastic motor ensemble\n",
      "source_domain": "",
      "target_domain": "",
      "status": "proposed",
      "bridge_claim": "Muscle force-velocity relationship (Hill equation: (F+a)(v+b)=const) emerges from the stochastic attachment-detachment kinetics of millions of myosin crossbridges; Huxley's 1957 sliding filament model is a master equation for crossbridge state transitions whose mean-field solution recovers Hill's ph",
      "open_unknowns": [],
      "related_hypotheses": [],
      "communication_gap": "Hill (1938) derived his force-velocity equation phenomenologically from thermodynamic arguments; Huxley (1957) provided the stochastic mechanistic model; the two were linked only gradually through mean-field analysis, and many muscle biologists still use Hill's equation without connecting it to crossbridge master equation foundations.\n",
      "translation_table": [
        {
          "field_a_term": "Myosin crossbridge (attached/detached state cycle)",
          "field_b_term": "Two-state Markov chain (attachment rate f, detachment rate g)",
          "note": "Each myosin head cycles between detached (state 1) and attached (state 2) states with position-dependent rates f(x) and g(x); the probability distribution n(x,t) over crossbridge positions follows the Huxley master equation, a transport PDE.\n"
        },
        {
          "field_a_term": "Muscle force (pN per crossbridge, N per muscle)",
          "field_b_term": "Mean-field average of Markov chain force",
          "note": "Total force is the sum of individual crossbridge forces (each ~5 pN); mean-field averaging over the crossbridge position distribution gives macroscopic force from microscopic Markov chain statistics.\n"
        },
        {
          "field_a_term": "Hill equation a/F_0 = b/(v_max) = hyperbolic constant",
          "field_b_term": "Mean-field steady-state of Huxley master equation",
          "note": "The hyperbolic Hill equation is the exact mean-field solution of the Huxley master equation under specific rate function choices; it is the law of large numbers applied to crossbridge ensemble kinetics.\n"
        },
        {
          "field_a_term": "Force-clamp step response (mechanical transient)",
          "field_b_term": "Relaxation of Markov chain to new steady state",
          "note": "After a sudden length step, force recovers through rapid crossbridge re-equilibration; the multiple time constants of recovery correspond to eigenvalues of the Huxley master equation Jacobian.\n"
        }
      ],
      "references": [
        {
          "doi": "10.1098/rspb.1957.0045",
          "note": "Huxley (1957) - muscle structure and theories of contraction; Prog Biophys 7:255"
        },
        {
          "doi": "10.1038/s41592-018-0227-9",
          "note": "Sweeney & Holzbaur (2016) - motor proteins; annual review integrating crossbridge and stochastic theory"
        }
      ],
      "last_reviewed": "2026-05-07",
      "file": "cross-domain/biology-physics/b-muscle-mechanics-x-crossbridge-theory.yaml"
    },
    {
      "id": "b-myosin-motor-x-brownian-ratchet",
      "title": "Myosin motor protein x Brownian ratchet - ATP hydrolysis as rectified diffusion\n",
      "source_domain": "",
      "target_domain": "",
      "status": "proposed",
      "bridge_claim": "Myosin II uses ATP hydrolysis to rectify Brownian thermal fluctuations into directed mechanical work via a Brownian ratchet mechanism; the power stroke is not a classical lever but an asymmetric diffusion step biased by chemical potential difference, making myosin a molecular implementation of Feynm",
      "open_unknowns": [],
      "related_hypotheses": [],
      "communication_gap": "Physiologists studying muscle mechanics (Huxley sliding filament model 1954) and physicists studying Brownian ratchets (Feynman 1962, Magnasco 1993) developed parallel frameworks for directed motion; the connection between ATP-driven molecular motors and Smoluchowski-Fokker-Planck ratchet theory was synthesized in the 1990s but motor proteins are still often described deterministically in biology textbooks.\n",
      "translation_table": [
        {
          "field_a_term": "myosin power stroke during ATP hydrolysis (biochemistry)",
          "field_b_term": "asymmetric Brownian ratchet potential rectifying diffusion (physics)",
          "note": "The power stroke is a thermally-assisted diffusion step in an asymmetric potential, not a deterministic lever action"
        },
        {
          "field_a_term": "ATP hydrolysis free energy delta_G ~ -50 kJ/mol (biochemistry)",
          "field_b_term": "chemical potential difference driving rectification in ratchet (statistical mechanics)",
          "note": "ATP hydrolysis provides the chemical potential that creates the asymmetric sawtooth potential biasing Brownian motion"
        },
        {
          "field_a_term": "myosin step size ~5-36 nm (single molecule biophysics)",
          "field_b_term": "spatial period of ratchet potential (physics)",
          "note": "The step size corresponds to the spatial period of the asymmetric ratchet; variance in step size reflects diffusive exploration"
        },
        {
          "field_a_term": "muscle force-velocity relationship (physiology)",
          "field_b_term": "load-dependent rectification efficiency of Brownian ratchet (statistical mechanics)",
          "note": "The hyperbolic force-velocity curve (Hill equation) corresponds to the load dependence of ratchet stepping rate"
        }
      ],
      "references": [
        {
          "doi": "10.1073/pnas.94.12.6185",
          "note": "Huxley (1957) - Muscle structure and theories of contraction; revisited with ratchet perspective; Prog Biophys 7:255"
        }
      ],
      "last_reviewed": "2026-05-07",
      "file": "cross-domain/biology-physics/b-myosin-motor-x-brownian-ratchet.yaml"
    },
    {
      "id": "b-osmotic-pressure-x-viral-capsid",
      "title": "Osmotic pressure x Viral capsid mechanics — genome packaging as pressurization\n",
      "source_domain": "",
      "target_domain": "",
      "status": "proposed",
      "bridge_claim": "Bacteriophage DNA packaging generates internal pressures of 50-100 atm inside the capsid, governed by the same van't Hoff osmotic pressure law that applies to semipermeable membranes; DNA ejection is an osmotically driven pressure-release mechanism.\n",
      "open_unknowns": [],
      "related_hypotheses": [],
      "communication_gap": "Membrane biophysics and virology developed as separate fields; the application of osmotic pressure theory to phage DNA packaging was only established with single-molecule experiments in the 2000s.\n",
      "translation_table": [
        {
          "field_a_term": "Osmotic pressure Pi = nRT/V",
          "field_b_term": "DNA packaging pressure inside phage capsid",
          "note": "The confined DNA acts as an ideal osmotic solute; van't Hoff law predicts 50-100 atm matching direct measurements by osmotic suppression experiments.\n"
        },
        {
          "field_a_term": "Semipermeable membrane water flux",
          "field_b_term": "DNA ejection through phage tail channel",
          "note": "Both are pressure-driven transport through a nanoscale channel; the ejection force equals the osmotic pressure times the channel cross-section.\n"
        }
      ],
      "references": [
        {
          "doi": "10.1016/S0006-3495(01)75940-2",
          "note": "Tzlil et al. (2003) — forces and pressures in DNA packaging and release from viral capsids"
        }
      ],
      "last_reviewed": "2026-05-07",
      "file": "cross-domain/biology-physics/b-osmotic-pressure-x-viral-capsid.yaml"
    },
    {
      "id": "b-photoreceptor-quantum-efficiency-x-photon-statistics",
      "title": "Photoreceptor Quantum Efficiency x Photon Statistics - retinal rod as single-photon detector\n",
      "source_domain": "",
      "target_domain": "",
      "status": "proposed",
      "bridge_claim": "Retinal rod photoreceptors can detect single photons with ~30% quantum efficiency and signal-to-noise ratio that approaches the quantum shot noise limit; the response is stochastic (Poisson-distributed photon arrivals), and the biochemical amplification cascade (one photon triggers 500 cGMP hydrolyz",
      "open_unknowns": [],
      "related_hypotheses": [],
      "communication_gap": "Quantum optics and visual neuroscience developed independently; Hecht, Shlaer & Pirenne (1942) established that humans can detect 5-7 photons in the 1940s; but the connection to quantum detection theory (Glauber coherence, photon statistics, Poisson counting) has not been fully worked out in the visual neuroscience literature.\n",
      "translation_table": [
        {
          "field_a_term": "Retinal rod photoreceptor (single cell, ~2 pA single-photon response)",
          "field_b_term": "Single-photon avalanche diode (SPAD) or photomultiplier tube",
          "note": "A retinal rod generates a ~2 pA current pulse upon absorbing a single photon, with a signal-to-noise ratio (~5:1) that approaches the shot noise limit of ideal single-photon detectors; the quantum efficiency (~25-30%) is comparable to many lab-grade SPADs.\n"
        },
        {
          "field_a_term": "Rhodopsin activation (1 photon -> 1 activated R*)",
          "field_b_term": "Photoelectric effect / photon absorption event",
          "note": "A single photon isomerizes 11-cis retinal to all-trans retinal, activating one rhodopsin molecule - the quantum event analogous to photoelectric emission; the isomerization yield (~67%) sets the intrinsic quantum efficiency.\n"
        },
        {
          "field_a_term": "cGMP cascade amplification (1 R* -> 500 PDE activated -> cGMP drop)",
          "field_b_term": "Avalanche multiplication in SPAD",
          "note": "The G-protein cascade amplifies the single-photon signal by a factor of ~500, similar to the avalanche multiplication factor in a SPAD; the gain is controlled by the lifetime of activated transducin.\n"
        },
        {
          "field_a_term": "Dark noise (spontaneous rhodopsin activation rate, ~0.01/rod/s)",
          "field_b_term": "Dark count rate of single-photon detector",
          "note": "Rods have a dark noise (spontaneous photon-like responses from thermal isomerization of rhodopsin) of ~0.01 per rod per second - a dark count rate comparable to cooled silicon SPADs, limiting detection at very low light levels.\n"
        }
      ],
      "references": [
        {
          "doi": "10.1126/science.1232602",
          "note": "Rieke & Baylor (1998) - origin of reproducibility in the responses of retinal rods; Biophysical Journal 75:1836"
        },
        {
          "doi": "10.1038/325143a0",
          "note": "Hecht, Shlaer & Pirenne (1942) - energy, quanta and vision; J Gen Physiol 25:819"
        }
      ],
      "last_reviewed": "2026-05-07",
      "file": "cross-domain/biology-physics/b-photoreceptor-quantum-efficiency-x-photon-statistics.yaml"
    },
    {
      "id": "b-plant-hydraulics-fluid-mechanics",
      "title": "Plant water transport via the cohesion-tension mechanism is governed by Hagen-Poiseuille pipe flow, operating under negative pressures approaching cavitation limits set by fluid physics, with stomatal optimization connecting fluid mechanics to carbon economics.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "Water transport in plants is driven by the cohesion-tension mechanism (Dixon & Joly 1895): transpiration at leaf surfaces creates a negative pressure (tension) that pulls water columns up from roots through xylem conduits. Hagen-Poiseuille governs xylem flow: Q = πr⁴ΔP/(8ηL), so hydraulic conductanc",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-hydraulic-failure-drives-tree-mortality-drought"
      ],
      "communication_gap": "Plant physiologists who measure vulnerability curves are largely separate from fluid mechanics researchers who study cavitation in engineering materials. The cohesion- tension mechanism remained controversial among physicists until direct pressure-probe measurements in the 1990s (Pockman et al. 1995) confirmed tensions beyond -1 MPa. Climate ecologists who model forest die-off rarely have deep training in plant hydraulics; hydraulics researchers rarely have climate modeling context.\n",
      "translation_table": [
        {
          "field_a_term": "Hagen-Poiseuille conductance K = πr⁴/(8ηL)",
          "field_b_term": "xylem hydraulic conductivity per unit pressure gradient",
          "note": "r⁴ scaling means minor variation in conduit radius has enormous conductance consequences"
        },
        {
          "field_a_term": "cavitation (bubble nucleation in liquid under tension)",
          "field_b_term": "xylem embolism — loss of hydraulic conductivity",
          "note": "Defined by xylem water potential at 50% conductivity loss (P₅₀) — key drought tolerance trait"
        },
        {
          "field_a_term": "negative pressure (tension) in metastable liquid",
          "field_b_term": "leaf water potential ψ_leaf (measured in MPa, negative)",
          "note": "Thermodynamic water potential; plants routinely operate in states forbidden in bulk physics"
        },
        {
          "field_a_term": "Poiseuille's r⁴ dependence",
          "field_b_term": "tradeoff between hydraulic efficiency and safety (wide conduits cavitate more easily)",
          "note": "Wide xylem vessels have high conductance but are more vulnerable to embolism — the safety-efficiency tradeoff"
        },
        {
          "field_a_term": "Cowan-Farquhar optimization (maximize ∫A·dt subject to ∫E·dt = const)",
          "field_b_term": "stomatal conductance g_s as the control variable in carbon-water economy",
          "note": "Variational problem in fluid mechanics (transpiration) constrained by biochemistry (photosynthesis)"
        }
      ],
      "references": [
        {
          "doi": "10.1093/aob/9.5.403",
          "note": "Dixon & Joly (1895) — On the ascent of sap, Ann Bot 9:403; cohesion-tension theory"
        },
        {
          "doi": "10.1038/428851a",
          "note": "Koch et al. (2004) — The limits to tree height, Nature 428:851"
        },
        {
          "doi": "10.1104/pp.88.3.569",
          "note": "Tyree & Sperry (1989) — Vulnerability of xylem to cavitation and embolism, Plant Physiol 88:569"
        },
        {
          "note": "Cowan & Farquhar (1977) — Stomata and the gas exchange, Symp Soc Exp Biol 31:471"
        }
      ],
      "last_reviewed": "2026-05-06",
      "file": "cross-domain/biology-physics/b-plant-hydraulics-fluid-mechanics.yaml"
    },
    {
      "id": "b-prion-misfolding-nucleation",
      "title": "Prion propagation follows nucleated polymerization kinetics analogous to crystal nucleation, where a critical nucleus of misfolded PrPSc acts as a template for converting native PrPC, with a lag phase duration determined by nucleation rate J proportional to exp(-Delta-G_nuc/kT)",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "Prion disease progression follows nucleated polymerization: PrPSc aggregates grow by recruiting and misfolding monomeric PrPC at rate k+, fragment at rate k-, and nucleate de novo at rate J; the sigmoid aggregation kinetics S(t) = 1/(1 + exp(-k_el * (t - t_lag))) match those of crystal nucleation-gr",
      "open_unknowns": [],
      "related_hypotheses": [],
      "communication_gap": "Neurologists and prion biologists study infectivity and pathology while statistical physicists study nucleation kinetics; the formal analogy between prion nucleation-polymerization and crystal nucleation is known in biophysics but rarely applied in clinical prion research or drug target identification.",
      "translation_table": [
        {
          "field_a_term": "PrPSc aggregate (misfolded prion protein)",
          "field_b_term": "beta-sheet crystal with infectious template surface",
          "note": "PrPSc is thermodynamically stable beta-sheet polymer; PrPC is metastable alpha-helix form"
        },
        {
          "field_a_term": "prion seeding / transmission",
          "field_b_term": "heterogeneous nucleation: foreign template bypasses nucleation barrier",
          "note": "Exogenous PrPSc lowers effective Delta-G_nuc; analogous to seed crystals bypassing induction period"
        },
        {
          "field_a_term": "disease incubation period (years)",
          "field_b_term": "lag time in nucleated polymerization: t_lag ~ (J * k+)^{-0.5}",
          "note": "Long incubation reflects slow spontaneous nucleation rate J at physiological PrPC concentration"
        },
        {
          "field_a_term": "strain diversity (different PrPSc conformations)",
          "field_b_term": "distinct crystal polymorphs with different template geometries",
          "note": "PrPSc conformation encodes strain; analogous to crystal polymorph selection by nucleation conditions"
        }
      ],
      "references": [
        {
          "doi": "10.1126/science.216.4543.136",
          "note": "Prusiner (1982) Science - novel proteinaceous infectious particles (prions) - original discovery"
        },
        {
          "doi": "10.1371/journal.pbio.0050321",
          "note": "Knowles et al. (2009) Science - analytical solution for nucleated polymerization kinetics"
        },
        {
          "doi": "10.1021/nn101555b",
          "note": "Jarrett & Lansbury (1993) Cell - seeding one-dimensional crystallization of amyloid via precursor assembly"
        }
      ],
      "last_reviewed": "2026-05-07",
      "file": "cross-domain/biology-physics/b-prion-misfolding-nucleation.yaml"
    },
    {
      "id": "b-protein-aggregation-x-nucleation-growth",
      "title": "Protein aggregation ↔ Nucleation-growth kinetics — amyloid as seeded polymerization",
      "source_domain": "",
      "target_domain": "",
      "status": "proposed",
      "bridge_claim": "Amyloid fibril formation (in Alzheimer's, Parkinson's, prion diseases) follows secondary nucleation kinetics: monomers add to fibril ends (elongation) and fibril surfaces catalyse new nucleus formation (secondary nucleation); the Knowles-Michaels equation exactly describes these kinetics, enabling r",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-protein-aggregation-x-nucleation-growth"
      ],
      "communication_gap": "Nucleation-growth kinetics was developed for inorganic crystallisation (Volmer & Weber 1926, Becker & Döring 1935) and applied to polymer crystallisation. Amyloid biology was developed in biochemistry and neuroscience focused on protein structure. The Knowles lab (Cambridge) systematically imported nucleation theory into amyloid biology (2009-present), but physical chemists studying crystallisation rarely read amyloid literature, missing the opportunity to apply Avrami theory, Ostwald ripening, and heterogeneous nucleation insights.",
      "translation_table": [
        {
          "field_a_term": "primary nucleation in amyloid formation (de novo nucleus formation from monomers)",
          "field_b_term": "primary nucleation rate J₁ = k₁[m]^(n₁) in classical nucleation theory",
          "note": "n₁ = nucleus size (typically 2-6 monomers); k₁ = primary nucleation rate constant"
        },
        {
          "field_a_term": "elongation (monomer addition to fibril ends)",
          "field_b_term": "crystal growth (monomer addition to crystal surface) in crystal growth theory",
          "note": "Elongation rate v = k₊[m] is linear in monomer concentration; same as Wilson-Frenkel growth law"
        },
        {
          "field_a_term": "secondary nucleation (fibril surface catalyses new nucleus formation)",
          "field_b_term": "heterogeneous nucleation on pre-existing surface in nucleation theory",
          "note": "Secondary nucleation rate J₂ = k₂[m]^(n₂)[fibril mass]^γ; dominates at long times"
        },
        {
          "field_a_term": "seeding experiment (pre-formed fibril fragments accelerate aggregation)",
          "field_b_term": "seeded crystal growth (inoculation with seed crystal bypasses primary nucleation)",
          "note": "Seeding bypasses lag phase by providing pre-formed nucleation surfaces"
        }
      ],
      "references": [
        {
          "doi": "10.1126/science.1254516",
          "note": "Cohen et al. (2013) — proliferation of amyloid-β42 aggregates via secondary nucleation; PNAS 110:9758"
        },
        {
          "doi": "10.1073/pnas.0910580107",
          "note": "Knowles et al. (2009) — analytical solution of master equation for amyloid polymerisation; Science 326:1533"
        }
      ],
      "last_reviewed": "2026-05-07",
      "file": "cross-domain/biology-physics/b-protein-aggregation-x-nucleation-growth.yaml"
    },
    {
      "id": "b-protein-folding-energy-landscape",
      "title": "Protein folding as a search on a funneled high-dimensional energy landscape — the same mathematical structure describes spin glass physics, neural network loss landscapes, and optimization",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "Protein folding is a search on a high-dimensional energy landscape E(conformation). The \"funnel\" landscape hypothesis (Bryngelson & Wolynes 1987): native proteins have evolved funneled energy landscapes that minimize frustration, ensuring kinetic accessibility of the native state. The folding rate k",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-alphafold-energy-landscape-implicit-learning"
      ],
      "communication_gap": "Protein biophysics and statistical physics of spin glasses developed largely independently despite sharing energy landscape mathematics. The Bryngelson-Wolynes theoretical framework was known to biophysicists but the spin glass connection (Wolynes's explicit analogy) took decades to be widely appreciated. The AlphaFold2 breakthrough was primarily celebrated in structural biology; its connection to energy landscape theory and random matrix theory of co-evolution received less mainstream attention.\n",
      "translation_table": [
        {
          "field_a_term": "energy landscape E(conformation)",
          "field_b_term": "loss landscape L(weights) in neural networks",
          "note": "Both are high-dimensional surfaces; funneled structure ensures efficient minimization"
        },
        {
          "field_a_term": "native state (global energy minimum)",
          "field_b_term": "optimal solution / trained model minimum",
          "note": "Evolution / training drives the system toward the global or low-loss basin"
        },
        {
          "field_a_term": "kinetic trap (local minimum)",
          "field_b_term": "saddle point / sharp local minimum in loss landscape",
          "note": "Both represent stuck states; annealing strategies help escape in both domains"
        },
        {
          "field_a_term": "folding funnel (decreasing entropy toward native state)",
          "field_b_term": "loss funnel (flat wide minima in overparameterized networks)",
          "note": "Wide minima generalize better; flat loss funnels analogize to evolved protein funnels"
        },
        {
          "field_a_term": "frustration (competing interactions preventing folding)",
          "field_b_term": "spin glass phase (replica symmetry breaking)",
          "note": "Random heteropolymers are spin glasses; evolved proteins minimize frustration"
        },
        {
          "field_a_term": "evolutionary co-variation (correlated mutations in MSA)",
          "field_b_term": "learned coupling matrix in AlphaFold2 attention",
          "note": "Co-evolution encodes the energy landscape; AlphaFold2 learns this implicitly"
        }
      ],
      "references": [
        {
          "doi": "10.1073/pnas.84.21.7524",
          "note": "Bryngelson & Wolynes (1987) PNAS 84:7524 — funneled energy landscape and principle of minimal frustration"
        },
        {
          "doi": "10.1038/nsb0197-10",
          "note": "Dill & Chan (1997) Nat Struct Biol 4:10 — review of energy landscape theory of protein folding"
        },
        {
          "doi": "10.1038/s41586-021-03819-2",
          "note": "Jumper et al. (2021) Nature 596:583 — AlphaFold2: structure prediction from evolutionary data"
        },
        {
          "note": "Wales (2003) Energy Landscapes (Cambridge University Press, ISBN 0521814138) — comprehensive treatment of energy landscape theory"
        }
      ],
      "last_reviewed": "2026-05-06",
      "file": "cross-domain/biology-physics/b-protein-folding-energy-landscape.yaml"
    },
    {
      "id": "b-protein-folding-funnel-energy-landscape",
      "title": "The protein folding funnel model, borrowed from statistical mechanics energy landscape theory, explains how proteins reliably fold to their native state despite Levinthal's paradox: the funnel-shaped free energy landscape biases the search toward the native basin, with entropy and enthalpy competing to carve the funnel.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "Energy landscape theory describes protein folding as diffusion on a multidimensional free energy surface F(Q) where Q is the fraction of native contacts. The funnel emerges because native-like contacts are energetically stabilized: F(Q) = E(Q) - TS(Q) where E(Q) ≈ -ε_0·N·Q (native contact energy) an",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-minimal-frustration-principle-de-novo-protein-design"
      ],
      "communication_gap": "Statistical physicists developed energy landscape theory for glasses and disordered systems in the 1980s; biophysicists studying protein folding adopted and adapted these concepts in the 1990s (Wolynes, Onuchic, Dill), creating the funnel framework; the communities now overlap substantially but the full mathematical mapping from spin-glass landscape theory to protein folding remains an active bridge.\n",
      "translation_table": [
        {
          "field_a_term": "protein folding funnel (biophysics)",
          "field_b_term": "free energy landscape with global minimum (statistical mechanics)",
          "note": "Both describe how a complex system finds its lowest free energy state"
        },
        {
          "field_a_term": "native state of protein (biophysics)",
          "field_b_term": "global free energy minimum / ground state (statistical mechanics)",
          "note": "The native fold corresponds to the basin of lowest free energy"
        },
        {
          "field_a_term": "misfolding / kinetic trap (biophysics)",
          "field_b_term": "local energy minimum / metastable state (statistical mechanics)",
          "note": "Kinetically trapped misfolded states correspond to metastable minima on the landscape"
        },
        {
          "field_a_term": "folding transition state ensemble (biophysics)",
          "field_b_term": "saddle point / free energy barrier (statistical mechanics)",
          "note": "The TS ensemble is the set of structures at the free energy maximum along the folding path"
        }
      ],
      "references": [
        {
          "doi": "10.1126/science.267.5204.1619",
          "note": "Dill & Chan (1997) - From Levinthal to pathways to funnels (landmark review)"
        },
        {
          "doi": "10.1038/386440a0",
          "note": "Wolynes et al. (1995) - navigating the folding routes"
        },
        {
          "doi": "10.1073/pnas.92.8.3626",
          "note": "Bryngelson et al. (1995) - funnels, pathways, and the energy landscape of protein folding"
        }
      ],
      "last_reviewed": "2026-05-07",
      "file": "cross-domain/biology-physics/b-protein-folding-funnel-energy-landscape.yaml"
    },
    {
      "id": "b-viral-self-assembly-capsid-physics",
      "title": "Viral capsids self-assemble from identical protein subunits into icosahedral shells whose geometry is fully predicted by Caspar-Klug triangulation theory, and whose thermodynamics and cooperative kinetics are quantitatively described by nucleation- elongation models from polymer physics.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "Caspar and Klug (1962) showed that icosahedral capsids can be indexed by the triangulation number T = h² + hk + k² (h, k non-negative integers), giving 60T protein subunits per capsid. Most plant viruses have T=3 (180 subunits); adenovirus has T=25 (1500 subunits); bacteriophage HK97 has T=7. This p",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-rna-electrostatic-packaging-signal-design"
      ],
      "communication_gap": "Caspar and Klug published in Cold Spring Harbor Symposia (structural biology), while the nucleation-elongation kinetics literature is primarily in biophysics and soft- matter physics journals. Virologists focused on genetics and replication rarely engage with the quantitative assembly physics literature. Single-molecule biophysicists and structural virologists have begun to converge, but the broader communities remain siloed by journal and conference culture.\n",
      "translation_table": [
        {
          "field_a_term": "triangulation number T = h² + hk + k²",
          "field_b_term": "geometrical tiling index for icosahedral surfaces",
          "note": "Pure geometry predicts subunit count 60T and quasi-equivalence of subunit environments"
        },
        {
          "field_a_term": "critical assembly concentration (CAC)",
          "field_b_term": "critical micelle concentration (CMC) analog",
          "note": "Below CAC, subunits remain as monomers; above CAC, capsids dominate"
        },
        {
          "field_a_term": "nucleation lag phase",
          "field_b_term": "nucleation in crystallization and fibril assembly — identical kinetic form",
          "note": "Zlotnick model parameters map onto classical nucleation theory"
        },
        {
          "field_a_term": "phi29 packaging motor force (>50 pN)",
          "field_b_term": "molecular motor mechanics — F-V curve, stall force",
          "note": "Measured by optical tweezers; surpasses myosin and kinesin force generation"
        },
        {
          "field_a_term": "electrostatic co-assembly (RNA-capsid)",
          "field_b_term": "polyelectrolyte condensation by multivalent cations",
          "note": "Analogous to DNA compaction by histones — charge neutralization drives condensation"
        }
      ],
      "references": [
        {
          "doi": "10.1101/SQB.1962.027.001.008",
          "note": "Caspar & Klug (1962) Cold Spring Harb Symp Quant Biol 27:1 — triangulation theory"
        },
        {
          "doi": "10.1006/jmbi.1994.1318",
          "note": "Zlotnick (1994) J Mol Biol 241:59 — nucleation-elongation capsid assembly model"
        },
        {
          "doi": "10.1038/35098134",
          "note": "Smith et al. (2001) Nature 413:748 — phi29 packaging motor optical tweezers"
        },
        {
          "note": "Hagan (2014) Adv Chem Phys 155:1 — theory of viral capsid assembly"
        }
      ],
      "last_reviewed": "2026-05-06",
      "file": "cross-domain/biology-physics/b-viral-self-assembly-capsid-physics.yaml"
    },
    {
      "id": "b-wound-healing-cell-migration-chemotaxis",
      "title": "Wound healing requires coordinated cell migration driven by chemotaxis gradients, mapping tissue repair to the Keller-Segel model of biophysical chemotaxis and connecting wound closure dynamics to active matter physics.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "Cell migration during wound healing follows Keller-Segel-type chemotaxis up gradients of growth factors (EGF, PDGF, VEGF); the collective motion of epithelial sheets at wound edges is described by active matter models where cell polarity, traction forces, and intercellular tension produce a travelin",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-active-matter-wound-closure-optimization"
      ],
      "communication_gap": "Cell biologists study wound healing through biochemical signaling pathways while biophysicists develop active matter and chemotaxis models; the quantitative connection between molecular signaling (growth factor gradients) and physical tissue mechanics (traction forces, collective migration) is rarely made explicit in either community.\n",
      "translation_table": [
        {
          "field_a_term": "growth factor gradient (cell biology)",
          "field_b_term": "chemoattractant field c(x,t) in Keller-Segel model (biophysics)",
          "note": "PDGF and EGF gradients at wound edges drive directed cell migration via chemotaxis"
        },
        {
          "field_a_term": "cell migration velocity (cell biology)",
          "field_b_term": "drift velocity in chemotaxis equation v = chi * grad(c) (biophysics)",
          "note": "Log-sensing chemotaxis (proportional to grad ln c) describes fibroblast migration in wound healing"
        },
        {
          "field_a_term": "epithelial collective migration (cell biology)",
          "field_b_term": "active polar fluid / active matter physics (physics)",
          "note": "Epithelial sheets at wound edges behave as active matter with polar order and topological defects"
        },
        {
          "field_a_term": "wound closure rate (clinical measure)",
          "field_b_term": "traveling wave speed in the Fisher-KPP equation (mathematics)",
          "note": "Wound closure follows a pulled traveling wave with speed 2*sqrt(r*D) where r is proliferation rate"
        }
      ],
      "references": [
        {
          "doi": "10.1016/0022-5193(71)90050-6",
          "note": "Keller & Segel (1971) - model for chemotaxis; original chemotaxis PDE"
        },
        {
          "doi": "10.1016/j.cell.2011.05.014",
          "note": "Trepat et al. (2011) - physical forces during collective cell migration in wound healing"
        },
        {
          "doi": "10.1038/nphys2733",
          "note": "Ladoux & Mège (2017) - mechanobiology of collective cell behaviour"
        }
      ],
      "last_reviewed": "2026-05-07",
      "file": "cross-domain/biology-physics/b-wound-healing-cell-migration-chemotaxis.yaml"
    },
    {
      "id": "b-animal-cognition-theory-of-mind",
      "title": "Theory of Mind — the ability to attribute mental states (beliefs, desires, intentions) to others — bridges comparative animal cognition and social-cognitive neuroscience, with the false-belief task as the canonical behavioral assay and mPFC-TPJ-STS as the neural substrate, while Dunbar's social brain hypothesis links neocortex size to social group size across primates.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "Theory of Mind (ToM) was formalized by Premack & Woodruff (1978) with the question \"do chimpanzees have a theory of mind?\" — a bridge between animal cognition (biology) and mental-state attribution (social cognition). The false-belief task (Wimmer & Perner 1983) became the standard measure: understa",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-tom-implicit-explicit-dissociation"
      ],
      "communication_gap": "Developmental psychologists who study ToM in children rarely interact with comparative primatologists who study it in great apes, or with social neuroscientists who image the mentalizing network. The three communities use different paradigms (verbal false-belief, non-verbal implicit tasks, fMRI) and publish in different journals (Developmental Psychology, Animal Cognition, Social Cognitive and Affective Neuroscience). Dunbar's social brain hypothesis, which provides the evolutionary bridge, is known across all three communities but rarely used to generate cross-disciplinary predictions.\n",
      "translation_table": [
        {
          "field_a_term": "theory of mind (social science / developmental psychology)",
          "field_b_term": "mentalizing network (mPFC, TPJ, STS) function in neuroscience",
          "note": "ToM as cognitive capacity maps onto a specific neural circuit with known lesion effects"
        },
        {
          "field_a_term": "false-belief task performance",
          "field_b_term": "TPJ BOLD activation during belief attribution",
          "note": "TPJ activity specifically tracks false (vs. true) belief attribution"
        },
        {
          "field_a_term": "social group size (primatology)",
          "field_b_term": "neocortex ratio (comparative neuroanatomy)",
          "note": "Dunbar's number: neocortex ratio predicts maximum stable social group size (r = 0.76)"
        },
        {
          "field_a_term": "implicit vs. explicit ToM",
          "field_b_term": "subcortical (amygdala, STS) vs. prefrontal mentalizing routes",
          "note": "Implicit ToM may use a faster subcortical route; explicit ToM requires PFC"
        },
        {
          "field_a_term": "competitive food-hiding (raven experiments)",
          "field_b_term": "agent-based modeling of social strategy evolution",
          "note": "Ravens' ToM-like behavior can be modeled as best-response in a Stackelberg game"
        },
        {
          "field_a_term": "evolutionary pressure (social complexity)",
          "field_b_term": "encephalization quotient across species",
          "note": "Social brain hypothesis predicts EQ from group size; testable across 200+ primate species"
        }
      ],
      "references": [
        {
          "note": "Premack & Woodruff (1978) — Does the chimpanzee have a theory of mind?",
          "doi": "10.1017/S0140525X00076512"
        },
        {
          "note": "Wimmer & Perner (1983) — Beliefs about beliefs (false-belief task)",
          "doi": "10.1016/0010-0277(83)90004-5"
        },
        {
          "note": "Onishi & Baillargeon (2005) — Do 15-month-old infants understand false beliefs?",
          "doi": "10.1126/science.1107621"
        },
        {
          "note": "Dunbar (1992) — Neocortex size as a constraint on group size in primates",
          "doi": "10.1016/0047-2484(92)90081-J"
        }
      ],
      "last_reviewed": "2026-05-06",
      "file": "cross-domain/biology-social-science/b-animal-cognition-theory-of-mind.yaml"
    },
    {
      "id": "b-behavioral-economics-evolutionary-psychology",
      "title": "Loss aversion, present bias, status quo bias, and the endowment effect — the core anomalies of behavioral economics — have evolutionary adaptations as their mechanistic origin: asymmetric fitness consequences of gains and losses in ancestral environments, encoded in prospect theory's value function V(x) = x^α for gains, -λ(-x)^β for losses (λ ≈ 2.25), and hyperbolic discounting U = u₀ + β Σ δ^t u_t (β < 1).\n",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "Kahneman-Tversky prospect theory (1979) documents systematic violations of expected utility theory: V(x) = x^α for gains (α≈0.88), V(x) = -λ(-x)^β for losses (λ≈2.25, β≈0.88). Loss aversion coefficient λ≈2.25 means losses hurt twice as much as equivalent gains feel good. Quasi-hyperbolic discounting",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-prospect-theory-lambda-fitness-landscape-ancestral-environment"
      ],
      "communication_gap": "Behavioral economics (Kahneman, Tversky, Thaler) developed largely independently of evolutionary psychology (Tooby, Cosmides, Buss), with separate journals (Econometrica vs. Evolution and Human Behavior) and distinct epistemological traditions (economic rationality violations vs. adaptive fitness). Neuroeconomics (Glimcher, Rangel) bridges these but is itself a recent hybrid field. Economic theorists are uncomfortable with adaptationist reasoning; evolutionary psychologists often lack formal economic utility theory training.\n",
      "translation_table": [
        {
          "field_a_term": "loss aversion coefficient λ ≈ 2.25 (prospect theory)",
          "field_b_term": "asymmetric fitness cost of resource loss vs. equivalent gain in ancestral ecology",
          "note": "λ predicts over-weighting of loss; evolution predicts this from variance in fitness consequences"
        },
        {
          "field_a_term": "quasi-hyperbolic discount factor β < 1",
          "field_b_term": "adaptive preference for immediate resource acquisition in uncertain environment",
          "note": "β → 0 = pure present bias; exponential δ-discounting is normative but not adaptive ancestrally"
        },
        {
          "field_a_term": "reference-dependent utility (gains/losses framed vs. neutral level)",
          "field_b_term": "adaptation-level theory in evolutionary psychology — fitness is change, not absolute",
          "note": "Organisms track environmental change not absolute state; neurons encode prediction errors"
        },
        {
          "field_a_term": "ventral striatum activation (fMRI) for gains",
          "field_b_term": "dopamine reward circuit (nucleus accumbens) for resource acquisition",
          "note": "Same circuit: prospect theory gain encoding = dopaminergic reward prediction error"
        },
        {
          "field_a_term": "anterior insula activation for losses and risk",
          "field_b_term": "aversive motivational system for predator/injury/starvation avoidance",
          "note": "Insula encodes interoceptive state; high-stakes losses trigger visceral aversion response"
        }
      ],
      "references": [
        {
          "doi": "10.2307/1914185",
          "note": "Kahneman & Tversky (1979) Prospect theory: an analysis of decision under risk. Econometrica 47:263"
        },
        {
          "doi": "10.1162/003355397555253",
          "note": "Laibson (1997) Golden eggs and hyperbolic discounting. Q J Econ 112:443"
        },
        {
          "note": "Glimcher (2011) Foundations of Neuroeconomics. Academic Press"
        },
        {
          "note": "Buss (2015) Evolutionary Psychology: The New Science of the Mind. Pearson, 5th edn"
        }
      ],
      "last_reviewed": "2026-05-06",
      "file": "cross-domain/biology-social-science/b-behavioral-economics-evolutionary-psychology.yaml"
    },
    {
      "id": "b-epigenetics-transgenerational-trauma",
      "title": "Epigenetic marks — DNA methylation and histone modifications — can persist across generations without altering DNA sequence, providing a molecular mechanism by which historical trauma (genocide, famine, war) leaves measurable biological signatures in descendants, bridging social history with molecular epigenomics.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "contested",
      "bridge_claim": "Epigenetic modifications — primarily CpG methylation of DNA and post- translational modifications of histones (H3K4me3, H3K27me3) — regulate gene expression without altering the underlying DNA sequence. Most of the epigenome is reset during germ cell development (epigenetic reprogramming), but a sub",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-sperm-small-rna-mediates-paternal-trauma-epigenetic-inheritance"
      ],
      "communication_gap": "Molecular biologists studying epigenetic reprogramming publish in Nature, Cell, and Molecular Cell; social scientists studying intergenerational trauma publish in sociology, psychology, and public health journals. The Yehuda group (psychiatry/endocrinology) bridges both but lacks the molecular mechanistic depth of pure epigenetics labs. The contested nature of the field — small samples, confounders, contested replication — makes social scientists cautious about adopting molecular claims, while molecular biologists view the human observational data as underpowered.\n",
      "translation_table": [
        {
          "field_a_term": "DNA methylation at CpG sites (molecular biology)",
          "field_b_term": "Molecular memory of social/environmental exposure (social science)",
          "note": "Methylation state is the biochemical encoding of experiential history"
        },
        {
          "field_a_term": "Glucocorticoid receptor (GR) expression level (endocrinology)",
          "field_b_term": "Stress reactivity and resilience phenotype (psychology/sociology)",
          "note": "GR promoter methylation determines HPA axis sensitivity"
        },
        {
          "field_a_term": "Epigenetic reprogramming at fertilisation (developmental biology)",
          "field_b_term": "Intergenerational biological reset — limits transmission scope",
          "note": "Most methylation marks erased; transmission requires escape from reprogramming"
        },
        {
          "field_a_term": "Imprinted loci (escape epigenetic reprogramming)",
          "field_b_term": "Heritable channels for transgenerational environmental information",
          "note": "Non-imprinted loci with incomplete erasure are candidate transmission vectors"
        },
        {
          "field_a_term": "Sperm small non-coding RNAs (miRNA, piRNA, tRNA fragments)",
          "field_b_term": "Paternal transmission vector for acquired experiential state",
          "note": "Sperm RNA is not erased at fertilisation; transfers to zygote"
        },
        {
          "field_a_term": "FKBP5 methylation at glucocorticoid response element",
          "field_b_term": "Molecular signature of trauma exposure in Holocaust survivor families",
          "note": "Yehuda et al. (2016) — same CpG in survivors and their adult children"
        }
      ],
      "references": [
        {
          "doi": "10.1016/j.biopsych.2015.08.005",
          "note": "Yehuda et al. (2016) Holocaust exposure induced intergenerational effects on FKBP5 methylation, Biol Psychiatry 80:372 — first human epigenetic evidence of transgenerational trauma transmission\n"
        },
        {
          "doi": "10.1146/annurev.neuro.24.1.1161",
          "note": "Meaney (2001) Maternal care, gene expression, and transmission of individual differences in stress reactivity across generations, Annu Rev Neurosci 24:1161\n"
        },
        {
          "doi": "10.1038/nn.3594",
          "note": "Dias & Bhattacharya (2014) Parental olfactory experience influences behavior and neural structure in subsequent generations, Nat Neurosci 17:89 — specific odor sensitivity inherited via sperm epigenome\n"
        },
        {
          "doi": "10.1016/j.cell.2014.04.015",
          "note": "Heard & Martienssen (2014) Transgenerational epigenetic inheritance: myths and mechanisms, Cell 157:95 — critical review of mechanisms and evidence\n"
        }
      ],
      "last_reviewed": "2026-05-06",
      "file": "cross-domain/biology-social-science/b-epigenetics-transgenerational-trauma.yaml"
    },
    {
      "id": "b-evolutionary-medicine-mismatch",
      "title": "Evolutionary Medicine and Mismatch Theory — thrifty genotype, hygiene hypothesis, myopia epidemic, and circadian disruption as mismatches between Pleistocene adaptations and modern environments",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "Evolutionary medicine (Nesse & Williams 1994) analyses disease through the lens of evolutionary history: many chronic diseases are mismatches between evolved adaptations and modern environments that differ radically from the Pleistocene. The thrifty genotype hypothesis (Neel 1962): alleles promoting",
      "open_unknowns": [],
      "related_hypotheses": [],
      "communication_gap": "Evolutionary medicine has been developing as a field since Nesse & Williams (1994) but remains marginal in medical education and clinical practice — most medical curricula do not include evolutionary biology. Clinicians trained in proximate mechanisms (how disease works) rarely engage with evolutionary (why) explanations. Public health practitioners who design interventions rarely consult evolutionary biologists. The field also faces scepticism about adaptationist storytelling — the challenge of distinguishing genuine evolutionary mismatch hypotheses from just-so stories requires rigorous cross-cultural and experimental tests.\n",
      "translation_table": [
        {
          "field_a_term": "thrifty genotype (efficient fat storage alleles)",
          "field_b_term": "metabolic syndrome risk alleles in calorie-abundant environments",
          "note": "FTO gene variant is the largest common variant for obesity; likely evolved in energy-scarce ancestral context"
        },
        {
          "field_a_term": "old friends hypothesis (evolved immune regulators)",
          "field_b_term": "immunological mismatch from loss of helminth and microbial exposures",
          "note": "Helminth-derived molecules (LNFPIII) restore Treg function in mouse models; helminth therapy trials in humans ongoing"
        },
        {
          "field_a_term": "dopamine-dependent retinal stop signal (light-driven)",
          "field_b_term": "outdoor light requirement for normal emmetropisation (eye growth regulation)",
          "note": "Time outdoors (>2 h/day, not specific activities) reduces myopia onset risk by ~40%; light intensity, not UV, is the key factor"
        },
        {
          "field_a_term": "social jetlag (chronotype vs. social schedule misalignment)",
          "field_b_term": "chronic circadian disruption from artificial light and work schedules",
          "note": "1 hour of social jetlag increases obesity risk ~33%; shift workers have elevated all-cause mortality"
        },
        {
          "field_a_term": "discordance hypothesis (excess energy density + sedentary lifestyle)",
          "field_b_term": "cardiovascular disease as evolutionary mismatch",
          "note": "Cordain et al.: Paleolithic diet (low glycemic load, high fibre, lean meat) had CV risk profile very different from modern Western diet"
        },
        {
          "field_a_term": "mismatch disease (adaptation in wrong environment)",
          "field_b_term": "chronic non-communicable disease as evolutionary lag",
          "note": "Key prediction: prevalence should correlate with degree of departure from ancestral environment; natural experiments (migration studies) test this"
        }
      ],
      "references": [
        {
          "note": "Nesse & Williams (1994) Why We Get Sick — evolutionary medicine founding text"
        },
        {
          "note": "Neel (1962) Am J Hum Genet 14:353 — thrifty genotype hypothesis"
        },
        {
          "doi": "10.1016/S0140-6736(12)60272-4",
          "note": "Morgan et al. (2012) Lancet 379:1739 — outdoor light and myopia prevention"
        },
        {
          "doi": "10.1136/bmj.299.6710.1259",
          "note": "Strachan (1989) BMJ 299:1259 — hygiene hypothesis (hay fever and family size)"
        }
      ],
      "last_reviewed": "2026-05-06",
      "file": "cross-domain/biology-social-science/b-evolutionary-medicine-mismatch.yaml"
    },
    {
      "id": "b-quorum-sensing-x-game-theory",
      "title": "Bacterial quorum sensing — collective switching via diffusible signals — is naturally modeled as a multiplayer game with nonlinear payoffs and thresholds, linking microbiology to economics-style strategic interaction.",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "Cells produce and respond to autoinducers; when signal concentration crosses a threshold, regulons activate (virulence, biofilm formation, competence). Producers pay metabolic costs; cheaters may exploit public goods without producing signal. The interaction resembles threshold public goods games an",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-quorum-thresholds-are-ess-under-stochastic-demography"
      ],
      "communication_gap": "Microbiology papers emphasize pathways; game theory papers emphasize payoffs. Empirical payoff matrices in clinical isolates remain sparse.",
      "translation_table": [
        {
          "field_a_term": "autoinducer concentration",
          "field_b_term": "public signal observable by players"
        },
        {
          "field_a_term": "threshold-regulated genes",
          "field_b_term": "discontinuous strategy switch at critical participation"
        },
        {
          "field_a_term": "cheater mutants",
          "field_b_term": "defectors in public goods games"
        }
      ],
      "references": [
        {
          "doi": "10.1073/pnas.0608255103",
          "note": "Brown & Johnstone (2007) — cooperation and bacterial communication (game-theoretic framing)"
        },
        {
          "doi": "10.1128/MMBR.00099-08",
          "note": "Waters & Bassler (2005) — quorum sensing review (molecular basis)"
        }
      ],
      "last_reviewed": "2026-05-07",
      "file": "cross-domain/biology-social-science/b-quorum-sensing-x-game-theory.yaml"
    },
    {
      "id": "b-sociobiology-kin-selection",
      "title": "Hamilton's rule (rb > c) derives the evolutionary conditions for altruism from population genetics, creating a quantitative bridge between biology and social science through inclusive fitness, the Price equation, and the gene-centered view of selection.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "Hamilton's (1964) rule rb > c — altruistic behavior spreads when the benefit b to a recipient weighted by genetic relatedness r exceeds the cost c to the actor — gives social science a quantitative evolutionary foundation for cooperation. Inclusive fitness = personal fitness + Σ rᵢ·bᵢ generalises Da",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-cultural-multilevel-selection-dominates-genetic"
      ],
      "communication_gap": "Sociobiological claims were received with ideological hostility in social science after Wilson's (1975) Sociobiology and the subsequent controversy; many social scientists explicitly rejected biological explanation of social behavior as reductionist. This created a decades-long communication gap that only partially healed through evolutionary psychology (Buss, Pinker) and cultural evolution (Henrich). Biologists and social scientists rarely co-author, attend the same conferences, or read each other's top journals.\n",
      "translation_table": [
        {
          "field_a_term": "genetic relatedness r",
          "field_b_term": "social network tie strength / kinship coefficient",
          "note": "Social scientists use network centrality; Hamilton used Wright's coefficient — both predict cooperation intensity"
        },
        {
          "field_a_term": "inclusive fitness",
          "field_b_term": "total welfare including effects on \"close\" others",
          "note": "Economists' externalities; sociologists' social capital both partially capture this"
        },
        {
          "field_a_term": "eusociality (queen-worker division of labor)",
          "field_b_term": "division of labor in human institutions",
          "note": "Nowak-Tarnita-Wilson analogy between superorganism and corporation is suggestive but contested"
        },
        {
          "field_a_term": "Price equation G = Cov(w,z)/w̄ + E(wΔz)/w̄",
          "field_b_term": "partitioning of social change into selection vs. transmission components",
          "note": "Price equation used in cultural evolution (Henrich & McElreath) — applies to memes as well as genes"
        },
        {
          "field_a_term": "cheater detection / reciprocal altruism (Trivers 1971)",
          "field_b_term": "social contract enforcement / legal punishment",
          "note": "Cosmides & Tooby use the Wason selection task to argue cheater-detection is cognitively specialized"
        }
      ],
      "references": [
        {
          "doi": "10.1016/0022-5193(64)90038-4",
          "note": "Hamilton (1964) — The genetical evolution of social behaviour I & II, J Theor Biol 7:1"
        },
        {
          "note": "Dawkins (1976) The Selfish Gene, Oxford University Press"
        },
        {
          "doi": "10.1038/nature09205",
          "note": "Nowak, Tarnita & Wilson (2010) — The evolution of eusociality, Nature 466:1057"
        },
        {
          "doi": "10.1111/j.1420-9101.2010.02110.x",
          "note": "West et al. (2011) — Sixteen common misconceptions about the evolution of cooperation, J Evol Biol 24:1180"
        }
      ],
      "last_reviewed": "2026-05-06",
      "file": "cross-domain/biology-social-science/b-sociobiology-kin-selection.yaml"
    },
    {
      "id": "b-dna-replication-fork-x-asymmetric-exclusion-traffic-jam",
      "title": "DNA replication advances as polymerases and accessory proteins track the fork while encountering obstacles — totally asymmetric simple exclusion processes (TASEP) on lattices exhibit boundary-induced phase separation and jamming fronts reminiscent of molecular motor queues — existing ribosome–TASEP bridges emphasize translation; this bridge foregrounds replisome traffic constraints on genomic DNA **without claiming literal ASEP universality in vivo**.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "proposed",
      "bridge_claim": "Leading- versus lagging-strand synthesis asymmetry and polymerase collisions produce heterogeneous occupancy patterns along DNA reminiscent of driven lattice gases — mathematical toy models (ASEP variants with extended particles, defects, slow bonds) supply intuition for jam accumulation ahead of le",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-stalling-density-wave-speed-correlates-with-seq-measured-pause-density-peaks"
      ],
      "communication_gap": "Replication biochemistry literature emphasizes kinase signaling (ATR/Chk1) while statistical physics emphasizes phase diagrams — integrated stochastic models remain unevenly adopted outside specialist collaborations.\n",
      "translation_table": [
        {
          "field_a_term": "Replication fork progression velocity vs genomic coordinate",
          "field_b_term": "ASEP particle current J versus density ρ phase diagram",
          "note": "Shared jam/facilitation metaphors — biology adds licensing and repair checkpoints."
        },
        {
          "field_a_term": "Replication stress checkpoints slowing fork progression",
          "field_b_term": "Reduced hopping rates at lattice defects halting shock fronts",
          "note": "Regulatory slowing parallels modified hopping-rate heterogeneity."
        },
        {
          "field_a_term": "Rear-ending collisions / polymerase stalling events",
          "field_b_term": "Multi-species exclusion processes with overtaking frustration",
          "note": "Explicit ASEP extensions closer than pure single-species TASEP."
        }
      ],
      "references": [
        {
          "doi": "10.1038/nrm2916",
          "note": "Zeman & Cimprich (2014) — causes and consequences of replication stress"
        },
        {
          "doi": "10.1016/0375-9601(93)90862-8",
          "note": "Derrida et al. (1993) — exact solution of 1D asymmetric exclusion model using matrix ansatz (reference lattice-gas shock phenomenology)"
        }
      ],
      "last_reviewed": "2026-05-07",
      "file": "cross-domain/biology-statistical-physics/b-dna-replication-fork-x-asymmetric-exclusion-traffic-jam.yaml"
    },
    {
      "id": "b-epithelial-jamming-x-colloidal-glass-rheology",
      "title": "Confluent epithelial monolayers exhibit jamming-like solid–fluid transitions in shape, motility, and stress transmission that parallel the disordered jamming and glassy rheology of dense colloids — enabling soft-matter scaling ideas to inform tissue mechanics and disease-related fluidization.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "Vertex and Voronoi models predict geometric jamming thresholds where cells lose motility as shape index approaches critical values; experiments on cultured epithelia show rigidity transitions reminiscent of yield stress and cage-breaking in particulate glasses — though active cellular forces, divisi",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-shared-shape-index-scaling-near-jamming-across-donors"
      ],
      "communication_gap": "Soft-matter lectures emphasize jamming phase diagrams for granular disks while developmental biology teaches epithelial sheet mechanics without citing Liu–Nagel-style diagrams — collaborative datasets linking traction microscopy to particle-tracking statistics remain uncommon outside specialized labs.\n",
      "translation_table": [
        {
          "field_a_term": "cell shape index / perimeter fluctuations at confluence",
          "field_b_term": "vanishing free volume / increased coordination in dense packings",
          "note": "Shared jamming vocabulary; biological activity breaks equilibrium glass assumptions."
        },
        {
          "field_a_term": "tissue fluidization under mechanical stress or cytokine stimulation",
          "field_b_term": "yielding of amorphous solids past τ_y (shear jamming)",
          "note": "Analogous stress–strain curves arise but metabolic regulation adds feedback loops."
        },
        {
          "field_a_term": "heterogeneity of cell speeds (dynamic heterogeneity)",
          "field_b_term": "dynamic heterogeneity in glass-forming liquids near T_g",
          "note": "Similar statistical descriptors used across communities when interpreted cautiously."
        }
      ],
      "references": [
        {
          "doi": "10.1038/nmat4357",
          "note": "Park et al. / Fredberg group — epithelial jamming–unjamming linked to asthma-related mechanics (Nature Materials)"
        },
        {
          "doi": "10.1038/nature03768",
          "note": "Liu & Nagel (1998) — jamming paradigm for nonthermal disordered systems"
        }
      ],
      "last_reviewed": "2026-05-07",
      "file": "cross-domain/biology-statistical-physics/b-epithelial-jamming-x-colloidal-glass-rheology.yaml"
    },
    {
      "id": "b-cryoem-bayesian-x-single-particle-reconstruction",
      "title": "Single-particle cryo-EM reconstructs 3D density maps by aligning noisy particle images whose orientations are latent variables — Bayesian posteriors over maps and alignment parameters (e.g., RELION marginalization) mirror hierarchical inverse problems in statistics where hyperpriors stabilize ill-posed tomographic reconstruction under extreme noise.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "Cryo-EM SPA treats each micrograph particle as a noisy projection of an unknown 3D volume V(r); orientation θ is hidden per particle. Algorithms alternate between refining θ estimates and updating V — analogous to expectation-maximization yet modern pipelines incorporate explicit Bayesian priors on ",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-cryoem-bayesian-x-single-particle-reconstruction"
      ],
      "communication_gap": "Structural biology training emphasizes hardware and sample prep more than Bayesian hierarchical modeling; statistics departments rarely assign cryo-EM datasets despite pedagogical value for non-asymptotic inverse problems.\n",
      "translation_table": [
        {
          "field_a_term": "Latent orientation θ per particle (cryo-EM)",
          "field_b_term": "Missing nuisance parameters in hierarchical Bayesian models",
          "note": "Both require marginalization or optimization over large latent spaces."
        },
        {
          "field_a_term": "3D volume V(r) reconstruction",
          "field_b_term": "Unknown high-dimensional parameter with tomographic forward operator A_θ",
          "note": "Linearized forward models resemble Radon-like operators under weak-phase object approximations."
        },
        {
          "field_a_term": "Gold-standard FSC resolution metrics",
          "field_b_term": "Posterior predictive checks / cross-validation splits in Bayesian inverse problems",
          "note": "Both assess reliability rather than point estimates alone."
        }
      ],
      "references": [
        {
          "doi": "10.1038/nmeth.1907",
          "note": "Scheres (2012) Nature Methods — Bayesian approach for cryo-EM structure determination with RELION"
        },
        {
          "doi": "10.1073/pnas.0305949101",
          "note": "Frank (2004) PNAS perspective — single-particle reconstruction trajectory"
        }
      ],
      "last_reviewed": "2026-05-07",
      "file": "cross-domain/biology-statistics/b-cryoem-bayesian-x-single-particle-reconstruction.yaml"
    },
    {
      "id": "b-lasso-sparsity-x-biomarker-panel-design",
      "title": "Lasso sparsity priors link statistical model selection to practical biomarker panel design.",
      "source_domain": "",
      "target_domain": "",
      "status": "proposed",
      "bridge_claim": "Speculative analogy: Lasso path sparsification can be interpreted as an assay-budget-aware strategy for selecting compact biomarker panels.",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-stability-selected-lasso-panels-outperform-fixed-biomarkers-under-assay-noise"
      ],
      "communication_gap": "Biomarker studies often optimize biological interpretability, while statistical workflows optimize predictive sparsity without deployment constraints.",
      "translation_table": [],
      "references": [
        {
          "doi": "10.1111/j.2517-6161.1996.tb02080.x",
          "note": "Lasso regression and shrinkage selection."
        }
      ],
      "last_reviewed": "2026-05-08",
      "file": "cross-domain/biology-statistics/b-lasso-sparsity-x-biomarker-panel-design.yaml"
    },
    {
      "id": "b-microplate-absorbance-x-inverse-beer-lambert-calibration",
      "title": "96-well microplate photometry inverts measured absorbance (or fluorescence intensity) to analyte concentration using Beer–Lambert linearity or calibration curves — a practical inverse problem whose conditioning, cross-talk, and batch effects parallel instrument-calibration theory in metrology and chemometrics.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "For monochromatic light and dilute solutions, absorbance A = ε c l links concentration c to transmission; microplate readers estimate c from A using standard curves, sometimes with linear mixed models for batch/plate effects. The mathematical object is an ill-conditioned linear inverse when multiple",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-multi-wavelength-beer-lambert-inverse-improves-plate-precision"
      ],
      "communication_gap": "Biochemistry protocols emphasize standard curves and vendor software defaults while analytical chemistry emphasizes uncertainty budgets and propagation — replicate assay scientists rarely publish full inverse-problem conditioning diagnostics per plate.\n",
      "translation_table": [
        {
          "field_a_term": "Beer–Lambert optical depth ε c l",
          "field_b_term": "expected absorbance readout per well",
          "note": "Linear regime assumes independent absorbers and negligible scattering."
        },
        {
          "field_a_term": "multi-wavelength absorbance vector per well",
          "field_b_term": "linear mixing model solved by least squares / ridge for concentrations",
          "note": "Mirrors spectroscopic unmixing and inverse concentration retrieval with positivity constraints."
        },
        {
          "field_a_term": "plate batch random effects (edge effects, evaporation)",
          "field_b_term": "hierarchical calibration offsets in metrology inter-laboratory studies",
          "note": "Shared statistical structure — mixed models — even though physics differs."
        }
      ],
      "references": [
        {
          "doi": "10.1515/iupac.68.3549",
          "note": "IUPAC Compendium entry — Beer–Lambert(-Bouguer) law foundations for absorption-based concentration inference."
        },
        {
          "doi": "10.1016/S0140-6736(75)91492-0",
          "note": "Voller et al. (1975) — microplate enzyme-linked immunosorbent assay foundation paper tying plate photometry to quantitative immuno readouts (Lancet)."
        }
      ],
      "last_reviewed": "2026-05-07",
      "file": "cross-domain/biology-statistics/b-microplate-absorbance-x-inverse-beer-lambert-calibration.yaml"
    },
    {
      "id": "b-phylogenetic-comparative-pgls",
      "title": "Phylogenetic generalised least squares (PGLS) corrects for the non- independence of closely related species by modelling trait covariance as proportional to shared branch length on the phylogenetic tree, bridging evolutionary biology to multivariate statistics through the variance- covariance structure of trait evolution under Brownian motion.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "PROBLEM: Closely related species share evolutionary history — a regression of body mass on metabolic rate across 100 mammal species treats data as 100 independent observations, but phylogenetic correlation inflates type I error rates dramatically.\nPGLS SOLUTION (Grafen 1989, Freckleton 2002): Model ",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-phylogenetic-signal-metabolic-rate-conservation"
      ],
      "communication_gap": "Evolutionary biologists learn phylogenetic methods as a specialist toolkit (PGLS, ancestral state reconstruction) without recognising they are GLS with a structured covariance. Statisticians working on spatial statistics and covariance-structured regression rarely engage with the phylogenetics literature where these methods were independently developed.\n",
      "translation_table": [
        {
          "field_a_term": "phylogenetic covariance matrix V (evolutionary biology)",
          "field_b_term": "variance-covariance matrix in GLS (statistics)",
          "note": "V encodes the expected covariance between species under Brownian motion on the tree"
        },
        {
          "field_a_term": "Brownian motion model of trait evolution",
          "field_b_term": "random walk / Gaussian process prior on trait values",
          "note": "Brownian motion is equivalent to a Gaussian process with linear covariance kernel = time to MRCA"
        },
        {
          "field_a_term": "Blomberg K (phylogenetic signal)",
          "field_b_term": "intraclass correlation coefficient",
          "note": "K measures the proportion of trait variance explained by phylogenetic relatedness — analogous to ICC in mixed models"
        },
        {
          "field_a_term": "Pagel lambda transformation",
          "field_b_term": "variance components estimation in mixed models",
          "note": "Lambda is estimated by REML/MLE — equivalent to estimating the random effects variance component in a linear mixed model"
        }
      ],
      "references": [
        {
          "doi": "10.1086/284325",
          "note": "Felsenstein (1985) Am Nat 125:1 — phylogenetically independent contrasts"
        },
        {
          "doi": "10.1098/rstb.1989.0106",
          "note": "Grafen (1989) Phil Trans R Soc B 326:119 — PGLS original derivation"
        },
        {
          "doi": "10.1554/0014-3820(2003)057[0717:TPAGSM]2.0.CO;2",
          "note": "Blomberg et al. (2003) Evolution 57:717 — K statistic"
        },
        {
          "doi": "10.1086/303577",
          "note": "Freckleton et al. (2002) Am Nat 160:712 — Pagel lambda estimation"
        }
      ],
      "last_reviewed": "2026-05-06",
      "file": "cross-domain/biology-statistics/b-phylogenetic-comparative-pgls.yaml"
    },
    {
      "id": "b-phylogeography-coalescent-molecular-clock",
      "title": "Phylogeography uses the coalescent theory from population genetics as a backward- time statistical model to date past population splits and migrations from present-day DNA sequences, with the molecular clock assumption providing the rate calibration that transforms branch lengths in mutations per site into years — making evolutionary biology a direct application of stochastic process theory.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "proposed",
      "bridge_claim": "The coalescent (Kingman 1982) describes how a sample of gene copies traces back to a common ancestor, with coalescence events occurring at rate C(k,2)/N_e for k gene copies in a population of effective size N_e. This backward-time Markov chain provides a probabilistic model for the genealogy of any ",
      "open_unknowns": [],
      "related_hypotheses": [],
      "communication_gap": "",
      "translation_table": [
        {
          "field_a_term": "coalescent rate C(k,2)/N_e",
          "field_b_term": "probability of two gene copies sharing a common ancestor in this generation",
          "note": "Sets the timescale for genealogical merging — inversely proportional to population size"
        },
        {
          "field_a_term": "molecular clock mu (mutations per site per year)",
          "field_b_term": "rate calibration for converting genetic distance to time",
          "note": "Estimated from fossil-calibrated internal nodes or known external events"
        },
        {
          "field_a_term": "MCMC posterior over demographic history",
          "field_b_term": "Bayesian reconstruction of ancestral population size changes",
          "note": "Bayesian skyline plot (Drummond et al.) — N_e(t) inferred from sequence data"
        },
        {
          "field_a_term": "effective population size N_e",
          "field_b_term": "genetically inferred population size (may differ from census size)",
          "note": "N_e integrates over variance in reproductive success and demographic fluctuations"
        }
      ],
      "references": [
        {
          "doi": "10.1093/genetics/105.3.767",
          "note": "Kingman (1982) — the coalescent; foundational paper for genealogical stochastic process theory"
        },
        {
          "doi": "10.1093/molbev/msp195",
          "note": "Drummond et al. (2005) — Bayesian coalescent inference of past population dynamics from molecular sequences"
        },
        {
          "doi": "10.1371/journal.pcbi.1006370",
          "note": "Bouckaert et al. (2019) — BEAST 2.5; phylogenetic inference using evolutionary models"
        }
      ],
      "last_reviewed": "2026-05-07",
      "file": "cross-domain/biology-statistics/b-phylogeography-coalescent-molecular-clock.yaml"
    },
    {
      "id": "b-random-matrix-denoising-x-single-cell-covariance-cleaning",
      "title": "Random matrix denoising maps finance-style covariance cleaning to single-cell expression structure recovery.",
      "source_domain": "",
      "target_domain": "",
      "status": "proposed",
      "bridge_claim": "Speculative analogy: Marchenko-Pastur spectral filtering used for noisy financial covariances can denoise high-dimensional single-cell expression covariances before downstream manifold steps.",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-rmt-covariance-cleaning-improves-single-cell-state-clustering"
      ],
      "communication_gap": "Communities use different terminology and validation conventions, masking transferable method equivalence.",
      "translation_table": [],
      "references": [
        {
          "doi": "10.1007/BF01015918",
          "note": "Marchenko-Pastur eigenvalue distribution."
        }
      ],
      "last_reviewed": "2026-05-08",
      "file": "cross-domain/biology-statistics/b-random-matrix-denoising-x-single-cell-covariance-cleaning.yaml"
    },
    {
      "id": "b-molecular-motors-thermodynamic-efficiency",
      "title": "Biological molecular motors (myosin, kinesin, ATP synthase) convert chemical free energy to mechanical work at 25-40% efficiency near the Carnot limit, verified by the Jarzynski equality connecting non-equilibrium work to equilibrium free energy, establishing single-molecule thermodynamics as a bridge between biophysics and mechanical engineering.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "Molecular motors in living cells are nanoscale machines that perform mechanical work by converting chemical energy (ATP hydrolysis), operating near the thermodynamic efficiency limits derived from macroscopic engineering thermodynamics.\n1. Molecular motor energetics. ATP hydrolysis: ATP → ADP + Pᵢ r",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-molecular-motor-near-equilibrium-operation"
      ],
      "communication_gap": "Mechanical engineers studying thermodynamic efficiency of macroscopic motors and biophysicists studying molecular motors are separated by six orders of magnitude in length scale and by disciplinary boundaries. The Jarzynski equality bridges non-equilibrium statistical physics to single-molecule biophysics, but is rarely taught in engineering thermodynamics courses. Toyabe et al. (2010) demonstrated an information-to-energy conversion machine at the single-molecule scale, realizing Maxwell's demon experimentally.\n",
      "translation_table": [
        {
          "field_a_term": "thermal efficiency η = W/Q_H (heat engine / engineering)",
          "field_b_term": "mechanochemical coupling efficiency W/ΔG_ATP (molecular motor)",
          "note": "Both are ratios of useful work output to energy input from different sources"
        },
        {
          "field_a_term": "Carnot limit η_C = 1 - T_C/T_H (thermodynamics)",
          "field_b_term": "isothermal efficiency limit W/ΔG ≤ 1 (molecular motor)",
          "note": "Molecular motors operate isothermally; Carnot limit is for heat engines"
        },
        {
          "field_a_term": "work W (macroscopic mechanics)",
          "field_b_term": "mechanical step work (force × displacement in nm·pN) per ATP",
          "note": "Same concept; 1 pN·nm = 10⁻²¹ J is the molecular scale unit"
        },
        {
          "field_a_term": "free energy ΔF (thermodynamics)",
          "field_b_term": "ΔG_ATP = ΔG° + kT ln([ADP][Pᵢ]/[ATP]) (biochemistry)",
          "note": "Both are the maximum useful work extractable from a process"
        },
        {
          "field_a_term": "fluctuation-dissipation theorem (statistical physics)",
          "field_b_term": "Jarzynski equality ⟨e^{-W/kT}⟩ = e^{-ΔF/kT} (non-equilibrium)",
          "note": "Jarzynski generalizes fluctuation-dissipation to non-equilibrium processes"
        },
        {
          "field_a_term": "efficiency at maximum power (Curzon-Ahlborn, engineering)",
          "field_b_term": "optimal ATP hydrolysis rate vs. force-velocity curve (biophysics)",
          "note": "Both systems face tradeoff between efficiency and power output"
        }
      ],
      "references": [
        {
          "url": "https://www.sinauer.com/media/wysiwyg/tocs/MechanicsOfMotorProteins.pdf",
          "note": "Howard (2001) Mechanics of Motor Proteins and the Cytoskeleton — Sinauer Associates"
        },
        {
          "doi": "10.1103/PhysRevLett.78.2690",
          "note": "Jarzynski (1997) Phys Rev Lett 78:2690 — nonequilibrium equality for free energy differences"
        },
        {
          "doi": "10.1038/nchembio0705-130",
          "note": "Bustamante et al. (2005) Nat Chem Biol 1:130 — mechanical design of translocating motor proteins"
        },
        {
          "doi": "10.1038/nphys1821",
          "note": "Toyabe et al. (2010) Nat Phys 6:988 — experimental demonstration of information-to-energy conversion"
        }
      ],
      "last_reviewed": "2026-05-06",
      "file": "cross-domain/biophysics-engineering/b-molecular-motors-thermodynamic-efficiency.yaml"
    },
    {
      "id": "b-stochastic-resonance-biosignaling-x-information-detection",
      "title": "Stochastic resonance in nonlinear biochemical sensors links noise-assisted threshold crossing to information-detection gains in weak biological signaling.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "proposed",
      "bridge_claim": "In excitable and threshold-like cellular pathways, moderate noise can increase detectability of weak periodic inputs by synchronizing barrier crossings with subthreshold stimuli. This maps directly to signal detection language: noise is not only nuisance, but can improve effective sensitivity in non",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-stochastic-resonance-matches-information-peak-in-cell-signaling"
      ],
      "communication_gap": "Biophysics studies often report resonance peaks without information metrics, while information-theory analyses abstract away molecular implementation constraints.\n",
      "translation_table": [
        {
          "field_a_term": "Kramers escape rate tuning",
          "field_b_term": "hit-rate/false-alarm tradeoff in weak-signal detection",
          "note": "Optimal noise maximizes response coherence before false alarms dominate."
        },
        {
          "field_a_term": "bistable potential landscape",
          "field_b_term": "binary communication channel with state-dependent transition probability",
          "note": "Barrier heights define operational signal bandwidth."
        },
        {
          "field_a_term": "coherence resonance",
          "field_b_term": "temporal coding reliability under intrinsic noise",
          "note": "Internal fluctuations can sharpen timing statistics near critical excitability."
        }
      ],
      "references": [
        {
          "doi": "10.1038/365337a0",
          "note": "Moss et al. (1993), early synthesis of stochastic resonance in biology."
        },
        {
          "doi": "10.1103/RevModPhys.70.223",
          "note": "Gammaitoni et al. (1998), comprehensive review of stochastic resonance theory and experiments."
        }
      ],
      "last_reviewed": "2026-05-08",
      "file": "cross-domain/biophysics-information-theory/b-stochastic-resonance-biosignaling-x-information-detection.yaml"
    },
    {
      "id": "b-mitochondrial-membrane-potential-pmf",
      "title": "Mitochondrial membrane potential is the biophysical embodiment of the proton-motive force: the electrochemical gradient of protons across the inner mitochondrial membrane stores free energy exactly as a thermodynamic battery, quantified by the Mitchell equation Delta_p = Delta_psi - (2.303 RT/F) Delta_pH.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "Peter Mitchell's chemiosmotic hypothesis formalises the inner mitochondrial membrane as a proton-impermeable capacitor. The proton-motive force Delta_p (mV) = Delta_psi - 59 Delta_pH at 37°C drives ATP synthase rotation via the FoF1 complex, converting electrochemical free energy into chemical bond ",
      "open_unknowns": [],
      "related_hypotheses": [],
      "communication_gap": "Cell biologists learn chemiosmosis as a narrative mechanism; thermodynamicists rarely study membrane proteins. The quantitative connection between Delta_p and Gibbs free energy, Carnot-like efficiency bounds, and non-equilibrium steady-state flux analysis is rarely taught across both disciplines.\n",
      "translation_table": [
        {
          "field_a_term": "proton-motive force Delta_p (thermodynamics)",
          "field_b_term": "mitochondrial membrane potential + pH gradient (biophysics)",
          "note": "Delta_p = Delta_psi - (2.303 RT/F) Delta_pH; both terms measurable by fluorescent probes"
        },
        {
          "field_a_term": "electrochemical potential difference Delta_mu_H+ (thermodynamics)",
          "field_b_term": "driving force for ATP synthase rotation (biophysics)",
          "note": "Each proton translocated through Fo releases Delta_mu_H+ = F * Delta_p of free energy"
        },
        {
          "field_a_term": "free-energy transducer efficiency (thermodynamics)",
          "field_b_term": "P/O ratio (ATP produced per oxygen consumed) (biophysics)",
          "note": "P/O ~ 2.5-2.7 in vivo; theoretical maximum set by Delta_G_ATP / (n * F * Delta_p)"
        },
        {
          "field_a_term": "leak current / dissipation (thermodynamics)",
          "field_b_term": "proton leak across inner membrane via uncoupling proteins (biophysics)",
          "note": "UCPs short-circuit Delta_p; thermogenic in brown adipose tissue (non-shivering thermogenesis)"
        }
      ],
      "references": [
        {
          "doi": "10.1038/191144a0",
          "note": "Mitchell (1961) - chemiosmotic coupling in oxidative and photosynthetic phosphorylation"
        },
        {
          "doi": "10.1146/annurev.biochem.76.060806.091607",
          "note": "Boyer (1997) - ATP synthase - past and future"
        },
        {
          "doi": "10.1126/science.1155709",
          "note": "Nicholls & Ferguson (2013) - Bioenergetics 4; quantitative treatment of Delta_p"
        }
      ],
      "last_reviewed": "2026-05-07",
      "file": "cross-domain/biophysics-thermodynamics/b-mitochondrial-membrane-potential-pmf.yaml"
    },
    {
      "id": "b-bayesian-dropout-x-adaptive-trial-stopping",
      "title": "Bayesian dropout uncertainty bridges approximate posterior inference and adaptive clinical-trial stopping decisions.",
      "source_domain": "",
      "target_domain": "",
      "status": "proposed",
      "bridge_claim": "Speculative analogy (to be empirically validated): Monte Carlo dropout predictive uncertainty can inform adaptive stopping boundaries similarly to posterior predictive criteria in Bayesian trial monitoring.",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-bayesian-dropout-uncertainty-improves-adaptive-trial-decisions"
      ],
      "communication_gap": "Clinical trialists rely on interpretable posterior decision rules, while deep-learning uncertainty estimates are often weakly calibrated for regulatory contexts.",
      "translation_table": [
        {
          "field_a_term": "dropout posterior samples",
          "field_b_term": "predictive probability draws",
          "note": "Both estimate uncertainty in treatment-effect predictions."
        },
        {
          "field_a_term": "uncertainty thresholding",
          "field_b_term": "stopping boundary rule",
          "note": "Decision thresholds determine continuation versus stopping actions."
        },
        {
          "field_a_term": "epistemic uncertainty",
          "field_b_term": "information value of additional enrollment",
          "note": "High uncertainty motivates continued recruitment."
        }
      ],
      "references": [
        {
          "arxiv": "1506.02142",
          "note": "Dropout as a Bayesian Approximation."
        },
        {
          "url": "https://www.fda.gov/regulatory-information/search-fda-guidance-documents/adaptive-design-clinical-trials-drugs-and-biologics-guidance-industry",
          "note": "FDA adaptive trial design guidance."
        }
      ],
      "last_reviewed": "2026-05-08",
      "file": "cross-domain/biostatistics-machine-learning/b-bayesian-dropout-x-adaptive-trial-stopping.yaml"
    },
    {
      "id": "b-microbial-fuel-cells-bioelectrochemistry",
      "title": "Microbial fuel cells exploit extracellular electron transfer by electrogenic bacteria to convert chemical energy directly to electrical current, mapping metabolic oxidation half-reactions onto electrochemical cell theory with the Nernst equation governing thermodynamic limits and biofilm conductivity replacing metallic electrode kinetics",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "Electrogenic bacteria such as Geobacter and Shewanella transfer electrons from intracellular NADH oxidation to an external anode via cytochrome c chains or nanowire pili, obeying the same Butler-Volmer kinetics and Nernst thermodynamics that govern conventional electrochemical cells; the open-circui",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-microbial-fuel-cell-anodic-electron-transfer"
      ],
      "communication_gap": "Microbiologists characterize extracellular electron transfer biochemically while electrochemists quantify it through impedance spectroscopy and cyclic voltammetry; shared quantitative frameworks emerged only in the 2000s via bioelectrochemistry, and maximum power density predictions remain inconsistent across the two communities.",
      "translation_table": [
        {
          "field_a_term": "metabolic electron donor (NADH, acetate) in bioelectrochemistry",
          "field_b_term": "anode oxidation half-reaction in electrochemistry",
          "note": "Both set the lower bound of the cell potential via the Nernst equation"
        },
        {
          "field_a_term": "cytochrome c / nanowire electron conduit (biotechnology)",
          "field_b_term": "electrode kinetics / Butler-Volmer exchange current (electrochemistry)",
          "note": "Biological electron-transfer chains play the role of electrode kinetics; overpotential arises from both"
        },
        {
          "field_a_term": "biofilm thickness and conductivity (biotechnology)",
          "field_b_term": "electrode surface area and specific capacitance (electrochemistry)",
          "note": "Dense conductive biofilms increase effective electrode area and charge-transfer rates"
        },
        {
          "field_a_term": "coulombic efficiency of bacterial metabolism (biotechnology)",
          "field_b_term": "Faradaic efficiency of electrochemical cell (electrochemistry)",
          "note": "Fraction of substrate electrons recovered as current; both limited by side reactions and mass transport"
        }
      ],
      "references": [
        {
          "doi": "10.1126/science.1139358",
          "note": "Lovley (2006) - bug juice: harvesting electricity with microorganisms"
        },
        {
          "doi": "10.1038/nrmicro2397",
          "note": "Lovley (2011) - live wires: direct extracellular electron exchange for bioenergy"
        },
        {
          "doi": "10.1002/anie.200600993",
          "note": "Rabaey & Verstraete (2005) - microbial fuel cells: novel biotechnology for energy generation"
        }
      ],
      "last_reviewed": "2026-05-07",
      "file": "cross-domain/biotechnology-electrochemistry/b-microbial-fuel-cells-bioelectrochemistry.yaml"
    },
    {
      "id": "b-plant-tropisms-auxin-reaction-diffusion",
      "title": "Plant tropic responses (phototropism, gravitropism, thigmotropism) are driven by lateral auxin gradients that emerge from an activator-inhibitor reaction-diffusion mechanism identical in mathematical structure to Turing's morphogenetic model, with PIN-mediated polar auxin transport playing the role of the fast-diffusing inhibitor",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "Lateral redistribution of the phytohormone auxin (IAA) during gravitropism follows a Turing-class reaction-diffusion system: auxin acts as a slowly diffusing activator of its own polar transport while activating inhibitory efflux carrier PIN proteins that relocalize to the lower flank, creating a st",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-auxin-turing-pattern-shoot-branching"
      ],
      "communication_gap": "Plant biologists studying tropism focus on molecular components (phototropins, amyloplasts, PIN trafficking) while mathematicians studying pattern formation work with abstract PDEs; the Turing interpretation of auxin patterning in phyllotaxis is established but its extension to tropism signaling is not universally accepted in the plant biology community.",
      "translation_table": [
        {
          "field_a_term": "auxin (IAA) concentration gradient across organ (botany)",
          "field_b_term": "activator concentration field u(x,t) in reaction-diffusion system (mathematics)",
          "note": "Auxin diffuses slowly and autocatalytically through PIN-mediated transport; fits the slow-activator role"
        },
        {
          "field_a_term": "PIN auxin efflux carrier polar localization (botany)",
          "field_b_term": "fast-diffusing inhibitor v(x,t) in Turing model (mathematics)",
          "note": "PIN protein relocalization acts as the inhibitory feedback that prevents auxin runaway and sets gradient position"
        },
        {
          "field_a_term": "differential cell elongation on high-auxin flank (botany)",
          "field_b_term": "pattern-driven morphogenetic output downstream of Turing instability (mathematics)",
          "note": "Cell elongation is the readout of the auxin pattern, analogous to pigmentation in Turing's original proposal"
        },
        {
          "field_a_term": "Cholodny-Went auxin asymmetry ratio (botany)",
          "field_b_term": "amplitude of steady-state Turing pattern (mathematics)",
          "note": "The log of the auxin ratio between flanks governs curvature, equivalent to pattern amplitude driving morphogenesis"
        }
      ],
      "references": [
        {
          "doi": "10.1016/j.cub.2010.09.026",
          "note": "Friml et al. (2010) - intracellular trafficking and proteolysis of the Arabidopsis PIN-FORMED auxin efflux carrier"
        },
        {
          "doi": "10.1126/science.1121248",
          "note": "Jonsson et al. (2006) - an auxin-driven polarized transport model for phyllotaxis (Turing-class PIN model)"
        },
        {
          "doi": "10.1098/rstb.2000.0618",
          "note": "Friml (2003) - auxin transport - shaping the plant (PIN-mediated polar transport review)"
        }
      ],
      "last_reviewed": "2026-05-07",
      "file": "cross-domain/botany-mathematics/b-plant-tropisms-auxin-reaction-diffusion.yaml"
    },
    {
      "id": "b-stomatal-regulation-game-theory",
      "title": "Stomatal aperture regulation solves an optimal control problem: maximise carbon assimilation per unit water lost while operating under uncertain atmospheric conditions — a dynamic optimisation identical in structure to the Lagrangian dual formulation in economics, making plant physiology a natural laboratory for testing optimal resource allocation theory.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "proposed",
      "bridge_claim": "Stomata regulate CO2 uptake and water vapor efflux through guard cell movements. A leaf faces a fundamental trade-off: open stomata maximise photosynthesis but lose water; closed stomata conserve water but starve carbon fixation. Cowan & Farquhar (1977) showed that the optimal stomatal policy maximi",
      "open_unknowns": [],
      "related_hypotheses": [],
      "communication_gap": "",
      "translation_table": [
        {
          "field_a_term": "constrained optimisation max[A - lambda*E]",
          "field_b_term": "stomatal optimal control (Cowan-Farquhar theory)",
          "note": "Maximise net carbon minus water cost; lambda is shadow price of water"
        },
        {
          "field_a_term": "shadow price lambda",
          "field_b_term": "marginal water cost of carbon (isohydric vs. anisohydric strategy)",
          "note": "Constant lambda => isohydric (ABA-regulated); variable lambda => anisohydric"
        },
        {
          "field_a_term": "Nash equilibrium",
          "field_b_term": "evolutionarily stable stomatal strategy in plant community",
          "note": "Each plant's optimal strategy given competitors' water use — drives market for water"
        },
        {
          "field_a_term": "Lagrangian duality",
          "field_b_term": "trade-off between carbon gain and water loss",
          "note": "Identical mathematical structure — the water budget is the binding constraint"
        }
      ],
      "references": [
        {
          "doi": "10.1071/PP9770067",
          "note": "Cowan & Farquhar (1977) — stomatal function in relation to leaf metabolism and environment"
        },
        {
          "doi": "10.1104/pp.110.160952",
          "note": "Medlyn et al. (2011) — reconciling the optimal and empirical approaches to stomatal modelling"
        },
        {
          "doi": "10.1111/j.1365-2435.2011.01897.x",
          "note": "Buckley (2017) — optimal leaf stomatal gradients"
        }
      ],
      "last_reviewed": "2026-05-07",
      "file": "cross-domain/botany-mathematics/b-stomatal-regulation-game-theory.yaml"
    },
    {
      "id": "b-actin-polymerization-treadmilling-nonequilibrium-kinetics",
      "title": "Actin filament treadmilling — simultaneous polymerization at the barbed end and depolymerization at the pointed end — is a non-equilibrium steady state maintained by ATP hydrolysis that bridges cell biology and non-equilibrium thermodynamics: the persistent directional flux requires constant energy input and violates detailed balance, making it a paradigmatic example of a biological Brownian ratchet.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "At steady-state treadmilling, the barbed end grows (k+_b·[G-actin] > k-_b) while the pointed end shrinks (k-_p > k+_p·[G-actin]). The critical concentration c_c = (k-_b·k+_p - k-_p·k+_b) / (k+_b·k+_p - k-_b·k-_p) sets the unique [G-actin] where net filament length is zero but ends are active. Treadm",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-actin-network-topology-determines-cell-motility-directionality"
      ],
      "communication_gap": "Cell biologists studying actin dynamics and biophysicists studying non-equilibrium statistical mechanics share the treadmilling framework, but quantitative application of fluctuation theorems and entropy production calculations to intracellular actin networks remains limited; most cell biology textbooks present treadmilling phenomenologically without connecting it to the non-equilibrium thermodynamic theory.\n",
      "translation_table": [
        {
          "field_a_term": "actin treadmilling (cell biology)",
          "field_b_term": "non-equilibrium steady-state flux violating detailed balance (biophysics)",
          "note": "Treadmilling is sustained by ATP free energy; without hydrolysis it equilibrates"
        },
        {
          "field_a_term": "barbed vs pointed end critical concentrations (cell biology)",
          "field_b_term": "asymmetric boundary conditions of a driven diffusion system (biophysics)",
          "note": "The different c_c values at each end are the thermodynamic signature of the non-equilibrium drive"
        },
        {
          "field_a_term": "ATP hydrolysis by actin (cell biology)",
          "field_b_term": "free energy input maintaining non-equilibrium flux (biophysics)",
          "note": "Delta-mu_ATP = ~50 kJ/mol drives the directional polymerization cycle"
        },
        {
          "field_a_term": "protrusive force of polymerizing actin (cell biology)",
          "field_b_term": "Brownian ratchet rectifying thermal fluctuations (biophysics)",
          "note": "Growing actin filament pushes a membrane by preventing backwards Brownian steps"
        }
      ],
      "references": [
        {
          "doi": "10.1146/annurev.bi.50.070181.001215",
          "note": "Wegner (1976, reviewed by Carlier 1998) - actin treadmilling mechanism"
        },
        {
          "doi": "10.1073/pnas.0501566102",
          "note": "Vavylonis et al. (2005) - actin polymerization kinetics and treadmilling ATP hydrolysis model"
        },
        {
          "doi": "10.1016/j.bpj.2013.06.032",
          "note": "Pollard & Borisy (2003) - cellular motility driven by assembly and disassembly of actin filaments"
        }
      ],
      "last_reviewed": "2026-05-07",
      "file": "cross-domain/cell-biology-biophysics/b-actin-polymerization-treadmilling-nonequilibrium-kinetics.yaml"
    },
    {
      "id": "b-chromatin-remodeling-epigenetic-landscape",
      "title": "Chromatin remodeling defines the epigenetic landscape as a biophysical energy surface where nucleosome positions are attractors and ATP-dependent remodeling complexes act as thermal fluctuation amplifiers that enable transitions between chromatin states — making Waddington's epigenetic landscape a quantitative free-energy landscape in the nucleosome positioning problem.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "proposed",
      "bridge_claim": "Waddington (1957) used the metaphor of a ball rolling down a landscape of valleys (cell fates) to describe development. Chromatin biophysics makes this literal: nucleosome positioning along DNA creates a free-energy landscape where positioned nucleosomes are free-energy minima, and chromatin remodel",
      "open_unknowns": [],
      "related_hypotheses": [],
      "communication_gap": "",
      "translation_table": [
        {
          "field_a_term": "free-energy landscape F(position)",
          "field_b_term": "Waddington epigenetic landscape (cell fate potential)",
          "note": "Waddington's metaphorical landscape is literally the free energy of chromatin states"
        },
        {
          "field_a_term": "Boltzmann distribution of nucleosome positions",
          "field_b_term": "nucleosome occupancy probability along DNA",
          "note": "MNase-seq and ATAC-seq measure this distribution empirically"
        },
        {
          "field_a_term": "ATP-dependent remodeling",
          "field_b_term": "non-equilibrium fluctuation amplification enabling state transitions",
          "note": "Remodelers do work against the free-energy gradient to enable gene regulatory transitions"
        },
        {
          "field_a_term": "free-energy barrier between chromatin states",
          "field_b_term": "epigenetic stability (robustness of cell identity)",
          "note": "High barriers = stable differentiated state; low barriers = plastic, reprogrammable"
        }
      ],
      "references": [
        {
          "doi": "10.1038/nature14248",
          "note": "Buenrostro et al. (2015) — ATAC-seq; the open chromatin landscape"
        },
        {
          "doi": "10.1126/science.1229223",
          "note": "Klemm et al. (2019) — chromatin accessibility and the regulatory epigenome"
        },
        {
          "doi": "10.1016/j.cell.2009.09.010",
          "note": "Zhang et al. (2009) — nucleosome positioning rules and implications for genome architecture"
        }
      ],
      "last_reviewed": "2026-05-07",
      "file": "cross-domain/cell-biology-biophysics/b-chromatin-remodeling-epigenetic-landscape.yaml"
    },
    {
      "id": "b-nuclear-pore-brownian-ratchet",
      "title": "Nuclear pore complex selective transport implements a Brownian ratchet mechanism where intrinsically disordered FG-nucleoporins create a fluctuating free-energy barrier that is directionally biased by RanGTP hydrolysis — the same physical principle that underlies kinesin stepping and other cytoskeletal molecular motors.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "proposed",
      "bridge_claim": "The nuclear pore complex (NPC) must transport hundreds of macromolecules per second while maintaining selectivity against non-specific cargo. Biophysics provides the mechanism: the ~50 nm channel is filled with intrinsically disordered FG-repeat nucleoporins that undergo rapid conformational fluctua",
      "open_unknowns": [],
      "related_hypotheses": [],
      "communication_gap": "",
      "translation_table": [
        {
          "field_a_term": "Brownian ratchet potential",
          "field_b_term": "FG-nucleoporin interaction energy landscape for importin-cargo",
          "note": "The asymmetric free-energy potential that converts thermal fluctuations into directed transport"
        },
        {
          "field_a_term": "power stroke / rectification",
          "field_b_term": "RanGTP hydrolysis releasing importin-cargo in nucleus",
          "note": "The energy input that prevents backward flux and sets the directionality"
        },
        {
          "field_a_term": "diffusion coefficient D",
          "field_b_term": "importin diffusion rate through FG meshwork",
          "note": "Sets the speed of transport and scales with cargo size as D ~ 1/r (Stokes-Einstein)"
        },
        {
          "field_a_term": "selectivity filter",
          "field_b_term": "FG-repeat hydrophobic interaction threshold",
          "note": "Cargo without NLS cannot make sufficient FG contacts to transiently lower the barrier"
        }
      ],
      "references": [
        {
          "doi": "10.1038/nature07950",
          "note": "Lim et al. (2007) — molecular model of selective transport through the nuclear pore"
        },
        {
          "doi": "10.1016/j.cell.2010.11.002",
          "note": "Mincer & Simon (2011) — Brownian ratchet model for NPC-mediated transport"
        },
        {
          "doi": "10.1146/annurev.biophys.37.032807.125817",
          "note": "Terry & Wente (2009) — flexible gates in NPC transport"
        }
      ],
      "last_reviewed": "2026-05-07",
      "file": "cross-domain/cell-biology-biophysics/b-nuclear-pore-brownian-ratchet.yaml"
    },
    {
      "id": "b-protein-ubiquitination-proteostasis-network",
      "title": "Protein ubiquitination cascades (E1-E2-E3 hierarchies) constitute a post-translational regulatory network whose topology determines proteostasis capacity: the systems-level flux balance between ubiquitin ligase activity and proteasome degradation controls whether misfolded proteins accumulate or are cleared, with implications for aging and neurodegeneration\n",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "Ubiquitination operates as a hierarchical enzymatic cascade (E1 ubiquitin-activating → E2 conjugating → E3 ligase substrate-specific) that attaches polyubiquitin chains to target proteins for 26S proteasome degradation; systems biology modeling of this network reveals proteostasis collapse as a bist",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-proteasome-saturation-bistability-neurodegeneration"
      ],
      "communication_gap": "Cell biologists characterize ubiquitination substrates and E3 ligase specificities while systems biologists build network models of signal transduction; quantitative models of ubiquitin flux and proteostasis capacity as an emergent network property are rare, partly because E3 ligase kinetic parameters are poorly measured.\n",
      "translation_table": [
        {
          "field_a_term": "E3 ubiquitin ligase substrate specificity (cell biology)",
          "field_b_term": "molecular recognition node in the ubiquitination network topology (systems biology)",
          "note": "Each E3 ligase defines a node in the protein degradation network; network topology determines substrate competition dynamics"
        },
        {
          "field_a_term": "polyubiquitin chain linkage type K48 vs K63 (cell biology)",
          "field_b_term": "signal encoding in degradation network — degradation signal vs. DNA damage pathway signal (systems biology)",
          "note": "K48-linked chains route proteins to proteasome; K63-linked chains signal DNA damage response or autophagy"
        },
        {
          "field_a_term": "proteasome substrate competition (cell biology)",
          "field_b_term": "resource competition in a degradation network with limited processing capacity (systems biology)",
          "note": "Multiple ubiquitinated substrates compete for finite 26S proteasome channel capacity; high competition causes selective degradation failure"
        },
        {
          "field_a_term": "proteostasis collapse in neurodegeneration (cell biology)",
          "field_b_term": "bistable switch in ubiquitin-proteasome system flux balance (systems biology)",
          "note": "α-synuclein / tau aggregates sequester E3 ligases and clog proteasome, creating positive feedback loop toward proteostasis failure"
        }
      ],
      "references": [
        {
          "doi": "10.1038/nature09783",
          "note": "Balch et al. (2008) - adapting proteostasis for disease intervention"
        },
        {
          "doi": "10.1016/j.cell.2012.09.040",
          "note": "Hartl et al. (2011) - molecular chaperones in protein folding and proteostasis"
        }
      ],
      "last_reviewed": "2026-05-07",
      "file": "cross-domain/cell-biology-biophysics/b-protein-ubiquitination-proteostasis-network.yaml"
    },
    {
      "id": "b-riboswitch-rna-aptamer-allosteric",
      "title": "Riboswitches function as RNA-based allosteric switches: the aptamer domain folds around a small-molecule ligand to trigger a global conformational change in the expression platform that controls transcription termination or translation initiation, with switching thermodynamics described by a two-state partition function\n",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "A riboswitch is a cis-acting mRNA element that couples small-molecule sensing (aptamer domain with K_d 1 nM - 1 μM) to genetic control (expression platform alternating between ON/OFF secondary structures) through allosteric conformational switching; the switching probability is P_ON = 1/(1 + [L]/K_d",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-riboswitch-kinetic-proofreading-cotranscriptional"
      ],
      "communication_gap": "Molecular biologists study riboswitch genetic regulation through in vivo reporter assays while biophysicists measure RNA folding thermodynamics by single-molecule FRET and calorimetry; quantitative models connecting in vitro folding thermodynamics to in vivo switching efficiency at physiological metabolite concentrations are rarely validated experimentally.\n",
      "translation_table": [
        {
          "field_a_term": "riboswitch aptamer domain (molecular biology)",
          "field_b_term": "RNA receptor with ligand-binding pocket and nanomolar affinity constant K_d (biophysics)",
          "note": "Aptamer domain binds cognate ligand with high selectivity; K_d determines the ligand concentration range for switching"
        },
        {
          "field_a_term": "expression platform secondary structure switching (molecular biology)",
          "field_b_term": "conformational transition between two RNA secondary structure minima on folding energy landscape (biophysics)",
          "note": "Expression platform adopts terminator hairpin (OFF) or anti-terminator (ON) depending on whether aptamer is bound"
        },
        {
          "field_a_term": "riboswitch switching threshold [L]_50 (molecular biology)",
          "field_b_term": "ligand concentration at which P_ON = 0.5, determined by K_d and ΔΔG_fold (biophysics)",
          "note": "[L]_50 = K_d * exp(ΔΔG_fold/RT); cooperative riboswitches have Hill coefficient n > 1 producing sharper switching"
        },
        {
          "field_a_term": "cotranscriptional folding kinetics (molecular biology)",
          "field_b_term": "RNA folding pathway competition during transcription elongation (biophysics)",
          "note": "Riboswitch function depends on folding rate versus transcription rate; thermodynamic equilibrium assumption breaks down when folding is slow"
        }
      ],
      "references": [
        {
          "doi": "10.1038/415610a",
          "note": "Nahvi et al. (2002) - genetic control by a metabolite binding mRNA"
        },
        {
          "doi": "10.1126/science.1099776",
          "note": "Winkler & Breaker (2005) - riboswitches and the role of noncoding RNAs in bacterial gene control"
        }
      ],
      "last_reviewed": "2026-05-07",
      "file": "cross-domain/cell-biology-biophysics/b-riboswitch-rna-aptamer-allosteric.yaml"
    },
    {
      "id": "b-stress-granules-liquid-liquid-phase-separation",
      "title": "Stress granules — membraneless organelles that condense in the cytoplasm under cellular stress — form through liquid-liquid phase separation (LLPS) driven by multivalent weak interactions among intrinsically disordered protein regions and RNA, following the same Flory-Huggins free energy framework used to describe polymer demixing in soft matter physics",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "Stress granule assembly obeys the Flory-Huggins lattice theory of polymer solutions: the condensed phase forms when the effective chi parameter (encoding RNA-protein and IDR-IDR interaction strengths) exceeds a critical threshold chi_c, producing a binodal/spinodal phase diagram; the condensate surf",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-stress-granule-binodal-concentration-prediction"
      ],
      "communication_gap": "Cell biologists characterize stress granule composition and assembly biochemically while soft matter physicists study polymer LLPS thermodynamics; the Flory-Huggins framework for biomolecular condensates was articulated by Hyman, Alberti and Pappu in the 2010s but quantitative chi parameter measurements in living cells remain technically challenging.",
      "translation_table": [
        {
          "field_a_term": "intrinsically disordered protein region (IDR) valency (cell biology)",
          "field_b_term": "polymer chain length and interaction parameter chi in Flory-Huggins theory (soft matter)",
          "note": "More IDR interaction motifs increase effective valency and shift the binodal to lower concentrations"
        },
        {
          "field_a_term": "stress granule condensate vs. dilute phase (cell biology)",
          "field_b_term": "polymer-rich vs. solvent-rich coexisting phases in LLPS (soft matter)",
          "note": "Lever rule applies: condensate volume fraction is set by distance from binodal in both systems"
        },
        {
          "field_a_term": "stress granule fusion and relaxation (cell biology)",
          "field_b_term": "coalescence of liquid droplets governed by capillary number (soft matter)",
          "note": "Granule fusion timescale ~ R*eta/gamma where R is radius, eta viscosity, gamma surface tension — pure soft matter"
        },
        {
          "field_a_term": "pathological hardening of stress granules in ALS/FTD (cell biology)",
          "field_b_term": "liquid-to-solid gelation or crystallization transition in polymer condensate (soft matter)",
          "note": "Aberrant amyloid-like fibrillization in IDRs corresponds to phase transition from liquid to gel/solid phase"
        }
      ],
      "references": [
        {
          "doi": "10.1146/annurev-biochem-060209-104336",
          "note": "Hyman et al. (2014) - liquid-liquid phase separation in biology (foundational review)"
        },
        {
          "doi": "10.1016/j.cell.2017.08.048",
          "note": "Boija et al. (2018) - transcriptional coactivators form condensates at active enhancers (LLPS in cell biology)"
        },
        {
          "doi": "10.1038/nchembio.2016.39",
          "note": "Molliex et al. (2015) - phase separation by low complexity domains promotes stress granule assembly"
        }
      ],
      "last_reviewed": "2026-05-07",
      "file": "cross-domain/cell-biology-soft-matter/b-stress-granules-liquid-liquid-phase-separation.yaml"
    },
    {
      "id": "b-debye-length-x-membrane-electrical-double-layer",
      "title": "Debye screening length in electrolytes ↔ Gouy–Chapman/Stern electrical double layer at biomembranes and soft interfaces (physical chemistry ↔ cell biophysics)\n",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "Poisson–Boltzmann theory predicts exponential screening of electrostatic potentials with Debye length lambda_D proportional to sqrt(epsilon k T / I) for ionic strength I. Biological membranes adsorb ions and carry fixed charges; the interfacial “double layer” combines a diffuse Gouy–Chapman region w",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-ion-specific-double-layer-competition-modulates-permeation"
      ],
      "communication_gap": "Biologists often quote “salt screens charges” qualitatively; physical chemists carry Stern corrections and activity coefficients rarely imported into molecular-cell talks without collaboration.\n",
      "translation_table": [
        {
          "field_a_term": "Debye length lambda_D",
          "field_b_term": "double-layer thickness scale adjacent to membrane",
          "note": "Sets where potential gradients are appreciable."
        },
        {
          "field_a_term": "ionic strength I",
          "field_b_term": "extracellular/intracellular ionic environment modulating protein–membrane interactions",
          "note": "Physiology tunes I across compartments."
        },
        {
          "field_a_term": "Stern layer capacitance",
          "field_b_term": "headgroup hydration and discrete charge sites",
          "note": "Corrections beyond mean-field PB."
        }
      ],
      "references": [
        {
          "doi": "10.1146/annurev.physchem.51.1.545",
          "note": "Roux (2000) — modeling electrostatics in biomolecular systems (Annu. Rev. Phys. Chem.)."
        },
        {
          "doi": "10.1016/S0006-3495(02)75304-0",
          "note": "McLaughlin et al. (2002) — electrostatic properties of membranes (Biophys. J.)."
        }
      ],
      "last_reviewed": "2026-05-07",
      "file": "cross-domain/chemistry-biology/b-debye-length-x-membrane-electrical-double-layer.yaml"
    },
    {
      "id": "b-electrochemical-impedance-x-cell-membrane",
      "title": "Electrochemical impedance spectroscopy (EIS) represents interfacial dynamics as complex impedance spectra — closely analogous to small-signal electrical models of cell membranes and ion-channel gating in the Hodgkin–Huxley tradition.",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "EIS fits equivalent circuits with resistive and capacitive elements to electrode–electrolyte interfaces, capturing charge transfer and double-layer capacitance. Cell membranes likewise present capacitive behavior with conductive ion channels whose conductances vary with voltage and time. Linearized ",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-eis-spectra-constrain-gating-substates"
      ],
      "communication_gap": "Electrochemistry textbooks emphasize electrodes; electrophysiology emphasizes channels. Shared impedance vocabulary exists but joint methodological papers are sparse outside specialized labs.",
      "translation_table": [
        {
          "field_a_term": "double-layer capacitance C_dl",
          "field_b_term": "membrane capacitance C_m"
        },
        {
          "field_a_term": "charge-transfer resistance R_ct",
          "field_b_term": "inverse of effective membrane conductance"
        },
        {
          "field_a_term": "Warburg diffusion impedance",
          "field_b_term": "electrodiffusion limitations in unstirred layers / long pores"
        }
      ],
      "references": [
        {
          "doi": "10.1113/jphysiol.1952.sp004764",
          "note": "Hodgkin & Huxley (1952) — ionic basis of excitation (membrane conductance dynamics)"
        },
        {
          "doi": "10.1002/9783527610426.ch1",
          "note": "Bard & Faulkner — foundational EIS treatment (book chapter DOI via Wiley reference work)"
        }
      ],
      "last_reviewed": "2026-05-07",
      "file": "cross-domain/chemistry-biology/b-electrochemical-impedance-x-cell-membrane.yaml"
    },
    {
      "id": "b-enzyme-engineering-directed-evolution",
      "title": "Directed evolution bridges chemistry and biology by applying Darwinian selection to proteins in the laboratory: iterative cycles of random mutagenesis, screening, and selection have produced enzymes with enhanced stability, altered specificity, and novel catalytic activities — including reactions no natural enzyme performs — with machine learning now compressing the experimental search space 100-fold.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "Directed evolution (Frances Arnold, Nobel Prize 2018) applies the logic of Darwinian evolution to proteins in vitro: create genetic diversity (mutagenesis), express the protein library, screen/select for the desired function, take the best variants, and repeat. This iterative process navigates the p",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-ml-directed-evolution-navigates-epistatic-fitness-landscape"
      ],
      "communication_gap": "Directed evolution developed at the interface of chemistry and biology but was initially resisted by both communities. Chemists valued rational mechanism-based design; the idea of \"directed\" Darwinian evolution seemed to abandon chemical understanding. Molecular biologists were skeptical that a few hundred mutations could produce new function. Arnold's early papers were rejected before acceptance. Computational protein design (Baker lab) and directed evolution (Arnold lab) operated largely independently until the ML-assisted fusion in the late 2010s. Industrial enzyme engineering (Novozymes, Codexis) is published in applied chemistry journals while academic directed evolution appears in Science, Nature, and PNAS — creating a secondary gap between industrial practice and academic theory.\n",
      "translation_table": [
        {
          "field_a_term": "random mutagenesis library (10⁷ protein variants)",
          "field_b_term": "chemical library (HTS compound collection, 10⁶ molecules)",
          "note": "both sample diversity for function; protein evolution is more efficient because mutations are correlated via sequence-function landscape"
        },
        {
          "field_a_term": "FACS screening (protein activity → cell survival)",
          "field_b_term": "high-throughput screening (HTS) in drug discovery (biochemical assay)",
          "note": "directed evolution exploits in vivo coupling of genotype to phenotype — eliminates resynthesis step"
        },
        {
          "field_a_term": "DNA shuffling (in vitro recombination of beneficial mutations)",
          "field_b_term": "sexual recombination (biological mechanism for combining beneficial mutations)",
          "note": "Stemmer explicitly modeled DNA shuffling on sexual recombination; both escape Muller's ratchet"
        },
        {
          "field_a_term": "fitness landscape (protein sequence → activity mapping)",
          "field_b_term": "energy landscape (molecular conformation → free energy mapping)",
          "note": "protein fitness landscapes can be rugged (multiple local optima) or smooth (single funnel) — determines evolvability"
        },
        {
          "field_a_term": "theozyme (transition state analog defining optimal active site geometry)",
          "field_b_term": "pharmacophore (3D arrangement of functional groups for drug binding)",
          "note": "both define the geometric and electronic constraints that the molecular scaffold must satisfy"
        }
      ],
      "references": [
        {
          "note": "Arnold (2018) Nobel Lecture — Directed evolution — bringing new chemistry to life; Nobel Foundation"
        },
        {
          "doi": "10.1002/chem.19990050826",
          "note": "Reetz & Jaeger (1999) Directed evolution of enantioselective enzymes; Chem Eur J 5:1308"
        },
        {
          "doi": "10.1126/science.1188934",
          "note": "Savile et al. (2010) Biocatalytic asymmetric synthesis of chiral amines; Science 329:305"
        },
        {
          "doi": "10.1126/science.aaf1698",
          "note": "Huang et al. (2016) De novo design of a four-fold symmetric TIM-barrel protein; Science 352:1285"
        }
      ],
      "last_reviewed": "2026-05-06",
      "file": "cross-domain/chemistry-biology/b-enzyme-engineering-directed-evolution.yaml"
    },
    {
      "id": "b-enzyme-kinetics-metabolic-network",
      "title": "Metabolic Control Analysis formalises the distributed nature of metabolic flux control in enzyme networks via the summation theorem (ΣCⁱⱼ = 1) and connectivity theorem, proving that no single enzyme is fully rate-limiting in a metabolic network — a result that emerged from bridging Michaelis-Menten kinetics with network-level systems theory.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "Michaelis & Menten (1913) derived the fundamental rate equation for an enzyme-catalysed reaction: v = Vmax[S]/(Km + [S]). This is derived by assuming quasi-steady state of the enzyme-substrate complex (ES): d[ES]/dt ≈ 0, giving Km = (k₋₁ + k₂)/k₁. The hyperbolic saturation curve means: at [S] << Km,",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-mca-summation-theorem-distributed-cancer-target"
      ],
      "communication_gap": "Michaelis-Menten kinetics was established in physical chemistry (Biochemische Zeitschrift), while MCA was developed in quantitative biology (Symposia of the Society for Experimental Biology, 1973 — not a mainstream journal). For 20 years after Kacser & Burns, MCA was unknown in the broader biochemistry community, which continued teaching the \"rate-limiting step\" concept. Even today, introductory biochemistry textbooks use the rate-limiting step concept without MCA corrections. Metabolic engineers who discovered the same control distribution empirically often rediscovered MCA principles without knowing the original formulation. The gap between single-enzyme kinetics (chemistry) and network control (systems biology) is still incompletely bridged in curricula.\n",
      "translation_table": [
        {
          "field_a_term": "Michaelis constant Km (affinity of enzyme for substrate)",
          "field_b_term": "network input sensitivity parameter"
        },
        {
          "field_a_term": "Vmax = kcat × [E_total] (maximum catalytic rate)",
          "field_b_term": "rate capacity of a network node"
        },
        {
          "field_a_term": "flux control coefficient Cⁱⱼ (enzyme j's share of flux control)",
          "field_b_term": "sensitivity coefficient in sensitivity analysis of systems models"
        },
        {
          "field_a_term": "summation theorem ΣCⁱⱼ = 1",
          "field_b_term": "budget constraint in network optimisation (resources sum to total)"
        },
        {
          "field_a_term": "elasticity εⱼˢ (kinetic sensitivity to metabolite)",
          "field_b_term": "local gain of a feedback element in control engineering"
        },
        {
          "field_a_term": "connectivity theorem (MCA)",
          "field_b_term": "gain-bandwidth product relationship in control systems"
        },
        {
          "field_a_term": "metabolic steady state (Sv = 0)",
          "field_b_term": "equilibrium of a linear dynamical system (dx/dt = Ax = 0)"
        }
      ],
      "references": [
        {
          "note": "Michaelis & Menten (1913) Biochem Z 49:333 — Michaelis-Menten kinetics"
        },
        {
          "doi": "10.1007/BF01093582",
          "note": "Kacser & Burns (1973) Symp Soc Exp Biol 27:65 — metabolic control analysis"
        },
        {
          "doi": "10.1111/j.1432-1033.1974.tb03318.x",
          "note": "Heinrich & Rapoport (1974) Eur J Biochem 42:89 — independent MCA derivation"
        },
        {
          "note": "Fell (1997) Understanding the Control of Metabolism. Portland Press"
        }
      ],
      "last_reviewed": "2026-05-06",
      "file": "cross-domain/chemistry-biology/b-enzyme-kinetics-metabolic-network.yaml"
    },
    {
      "id": "b-enzyme-kinetics-x-michaelis-menten",
      "title": "Enzyme kinetics x Michaelis-Menten — substrate saturation as queueing theory\n",
      "source_domain": "",
      "target_domain": "",
      "status": "proposed",
      "bridge_claim": "The Michaelis-Menten enzyme saturation curve is mathematically identical to an M/M/1 queueing model where the enzyme is the server, substrate molecules are customers, and kcat is the service rate; enzyme inhibition kinetics map to different queueing disciplines (priority queues for competitive inhib",
      "open_unknowns": [],
      "related_hypotheses": [],
      "communication_gap": "Biochemists derive the Michaelis-Menten equation from chemical kinetics (rapid equilibrium or steady-state assumptions) without recognizing the isomorphism to queueing theory; operations researchers are unaware that their algorithms have direct biochemical analogs, blocking potential cross-fertilization in metabolic engineering and network pharmacology.\n",
      "translation_table": [
        {
          "field_a_term": "enzyme active site (biochemistry)",
          "field_b_term": "server in M/M/1 queue (queueing theory)",
          "note": "The enzyme processes one substrate at a time; Km is the dissociation constant analogous to the arrival-to-service rate ratio"
        },
        {
          "field_a_term": "Km (Michaelis constant, biochemistry)",
          "field_b_term": "traffic intensity ρ = λ/μ (queueing theory)",
          "note": "Km sets the substrate concentration for half-maximal velocity, equivalent to the load factor at which the queue becomes saturated"
        },
        {
          "field_a_term": "competitive inhibition (biochemistry)",
          "field_b_term": "priority queueing discipline (queueing theory)",
          "note": "Competitive inhibitor occupies the active site with higher priority than substrate, blocking service — exactly a priority queue"
        },
        {
          "field_a_term": "kcat/Km (catalytic efficiency, biochemistry)",
          "field_b_term": "throughput per unit load (operations research)",
          "note": "Diffusion-limited enzymes achieve kcat/Km near 10^8-10^9 M^-1 s^-1, the theoretical queueing throughput maximum"
        }
      ],
      "references": [
        {
          "doi": "10.1111/j.1432-1033.1994.tb18811.x",
          "note": "Heinrich & Schuster (1994) - The regulation of cellular systems; European J Biochemistry"
        }
      ],
      "last_reviewed": "2026-05-07",
      "file": "cross-domain/chemistry-biology/b-enzyme-kinetics-x-michaelis-menten.yaml"
    },
    {
      "id": "b-lipid-metabolism-cellular-signaling",
      "title": "Lipid Metabolism and Cellular Signaling — eicosanoids, sphingolipids, and the PI3K-PIP3-Akt axis link lipid chemistry to inflammation, survival, and cancer",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "Lipids serve three distinct biological roles: structural (phospholipid bilayers), energy storage (triglycerides in adipocytes), and signalling. Eicosanoid signalling begins with phospholipase A2 releasing arachidonic acid (C20:4) from membrane phospholipids. Cyclooxygenase-1 (COX-1, constitutive) an",
      "open_unknowns": [],
      "related_hypotheses": [],
      "communication_gap": "Lipid biochemistry has historically been fragmented: prostaglandin pharmacology, sphingolipid biology, and phosphoinositide signalling developed as largely separate sub-fields with distinct journals and conference communities. The PI3K field was long dominated by biochemists while eicosanoid research was driven by pharmacologists. Systems lipidomics (mass spectrometry of hundreds of lipid species simultaneously) is beginning to unify these sub-fields by providing a comprehensive view of the lipidome.\n",
      "translation_table": [
        {
          "field_a_term": "COX-1/2 catalytic cyclooxygenation of arachidonic acid",
          "field_b_term": "prostaglandin and thromboxane synthesis in inflammation",
          "note": "Aspirin Ser530 acetylation permanently blocks the arachidonic acid substrate channel"
        },
        {
          "field_a_term": "ceramide (N-acylsphingosine)",
          "field_b_term": "pro-apoptotic lipid second messenger",
          "note": "Ceramide activates PP2A, inhibits Akt, and increases mitochondrial membrane permeability"
        },
        {
          "field_a_term": "PIP3 (phosphatidylinositol 3,4,5-trisphosphate)",
          "field_b_term": "lipid second messenger that recruits PH-domain proteins to the plasma membrane",
          "note": "PIP3 is the 'molecular landing pad' for Akt and PDK1; its local concentration is PI3K minus PTEN"
        },
        {
          "field_a_term": "PTEN lipid phosphatase activity",
          "field_b_term": "tumour suppression via PIP3 degradation",
          "note": "PTEN is the second most mutated tumour suppressor after p53; its loss is sufficient for PI3K pathway hyperactivation"
        },
        {
          "field_a_term": "sphingosine-1-phosphate (S1P) receptor signalling",
          "field_b_term": "G-protein-coupled receptor control of lymphocyte trafficking and vascular tone",
          "note": "S1P receptors (S1PR1–5) are GPCRs; fingolimod (MS drug) is a functional S1P antagonist trapping lymphocytes in lymph nodes"
        },
        {
          "field_a_term": "lipid droplet biogenesis (ER lens model)",
          "field_b_term": "intracellular lipid storage and stress response organelle",
          "note": "LD coat proteins (perilipins) regulate lipase access; LDs interact with mitochondria, ER, and autophagosomes"
        }
      ],
      "references": [
        {
          "doi": "10.1038/newbio231232a0",
          "note": "Vane (1971) Nat New Biol 231:232 — prostaglandin synthesis and aspirin mechanism"
        },
        {
          "doi": "10.1038/nrm2329",
          "note": "Hannun & Obeid (2008) Nat Rev Mol Cell Biol 9:139 — sphingolipid signalling"
        },
        {
          "doi": "10.1126/science.1071420",
          "note": "Cantley (2002) Science 296:1655 — PI3K pathway and cancer"
        },
        {
          "doi": "10.1146/annurev-biochem-061009-101430",
          "note": "Walther & Farese (2012) Annu Rev Biochem 81:687 — lipid droplet biology"
        }
      ],
      "last_reviewed": "2026-05-06",
      "file": "cross-domain/chemistry-biology/b-lipid-metabolism-cellular-signaling.yaml"
    },
    {
      "id": "b-marcus-tunneling-x-enzyme-reaction-coordinate",
      "title": "Marcus electron-transfer theory — reorganizational free energy λ and electronic coupling V_DA along a reaction coordinate — supplies the canonical framework for interpreting nuclear tunneling corrections and inverted-region kinetics in enzyme-catalyzed redox reactions when tunneling is analyzed along the same collective solvent/protein modes used in PCET models.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "Marcus theory expresses nonadiabatic electron-transfer rates as k_ET ∝ |V_DA|² √(λ/(4πk_B T)) exp(-(ΔG°+λ)²/(4λ k_B T)), where λ is the reorganizational free energy along the collective solvent/reaction coordinate. Enzyme tunneling discussions often emphasize distance along donor–acceptor coordinate",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-marcus-tunneling-x-enzyme-reaction-coordinate"
      ],
      "communication_gap": "Marcus theory is central in physical chemistry curricula but often omitted in introductory enzyme catalysis courses focused on Michaelis–Menten kinetics. Conversely, enzyme tunneling literature foregrounds KIE experiments and WKB-style barriers without always tracing λ extraction protocols back to Marcus analysis of driving-force sweeps.\n",
      "translation_table": [
        {
          "field_a_term": "Reorganization energy λ (Marcus theory)",
          "field_b_term": "Collective coordinate breadth for donor–acceptor coupling and tunneling distance sampling (enzyme PCET models)",
          "note": "Large λ implies heavier solvent/protein reorganization before crossing; tunneling corrections modify prefactors while λ sets the activation Marcus parabola."
        },
        {
          "field_a_term": "Electronic coupling V_DA between diabatic states",
          "field_b_term": "Effective tunneling strength across the redox-active orbital pathway (through-bond vs through-space)",
          "note": "Strong coupling suppresses nonadiabatic limits; enzymes tune distance/covalency to keep reaction in electronically nonadiabatic regimes where tunneling signatures appear clearly."
        },
        {
          "field_a_term": "Inverted region (−ΔG° > λ)",
          "field_b_term": "Rate decreases despite thermodynamic favorability (electron transfer kinetics)",
          "note": "Used experimentally to bracket λ and validate Marcus frameworks in proteins; tunneling-adjusted theories preserve inverted-region tests when nuclear overlaps shrink with driving force."
        },
        {
          "field_a_term": "Landau–Zener crossing probability along reaction coordinate",
          "field_b_term": "Protein dynamics-gated hydrogen/electron transfer events in enzymes",
          "note": "Same crossing geometry determines whether vibronic coupling models or simpler Marcus rates suffice."
        }
      ],
      "references": [
        {
          "doi": "10.1103/RevModPhys.65.599",
          "note": "Marcus (1993) Rev. Mod. Phys. — electron transfer theory including λ and inverted region"
        },
        {
          "doi": "10.1021/ar050201z",
          "note": "Hammes-Schiffer (2006) Acc. Chem. Res. — PCET and hydrogen tunneling in enzymes"
        }
      ],
      "last_reviewed": "2026-05-07",
      "file": "cross-domain/chemistry-biology/b-marcus-tunneling-x-enzyme-reaction-coordinate.yaml"
    },
    {
      "id": "b-metabolic-flux-x-linear-programming",
      "title": "Metabolic Flux Analysis x Linear Programming - stoichiometric constraints as convex polytope\n",
      "source_domain": "",
      "target_domain": "",
      "status": "proposed",
      "bridge_claim": "Flux balance analysis (FBA) models cellular metabolism as a linear program: maximize biomass production subject to stoichiometric equality constraints and thermodynamic inequality constraints; the feasible flux space is a convex polytope and the optimal metabolic state is a vertex - making systems b",
      "open_unknowns": [],
      "related_hypotheses": [],
      "communication_gap": "Linear programming theory (simplex method, duality) has been mature since 1947, but its systematic application to genome-scale metabolic models only began in the 1990s (Varma & Palsson); most biology textbooks do not present metabolism as an optimization problem, impeding import of 70 years of LP theory.\n",
      "translation_table": [
        {
          "field_a_term": "Stoichiometric matrix S (m reactions x n metabolites)",
          "field_b_term": "Constraint matrix A in LP (Ax = b, x >= 0)",
          "note": "Each row of S is a metabolite mass balance; steady-state assumption (dC/dt = 0) gives Sv = 0, which is the LP equality constraint; upper and lower bounds on fluxes (thermodynamic reversibility, enzyme capacity) give the inequality constraints.\n"
        },
        {
          "field_a_term": "Flux distribution vector v (mmol/gDW/h)",
          "field_b_term": "Decision variable vector x in LP",
          "note": "The vector of all reaction fluxes is the LP decision variable; each feasible flux distribution is a point in the flux polytope; optimizing biomass yield finds the flux vertex maximizing the objective function.\n"
        },
        {
          "field_a_term": "Optimal growth flux (biomass reaction rate)",
          "field_b_term": "LP objective function value",
          "note": "The biomass reaction is a pseudo-reaction with stoichiometry equal to the dry- weight composition of the cell; maximizing its flux is the LP objective, equivalent to maximizing growth rate subject to nutrient constraints.\n"
        },
        {
          "field_a_term": "Extreme ray (thermodynamic unfeasible direction)",
          "field_b_term": "Infeasibility certificate (LP dual variable)",
          "note": "Thermodynamic infeasibility (net ATP consumption without energy source) corresponds to an unbounded LP ray; adding thermodynamic constraints (loop-law) restricts the polytope to eliminate these rays.\n"
        }
      ],
      "references": [
        {
          "doi": "10.1038/nbt.1614",
          "note": "Orth, Thiele & Palsson (2010) - what is flux balance analysis? Nature Biotechnology 28:245"
        },
        {
          "doi": "10.1038/msb.2013.18",
          "note": "Bordbar et al. (2014) - constraint-based models predict metabolic and associated cellular functions"
        }
      ],
      "last_reviewed": "2026-05-07",
      "file": "cross-domain/chemistry-biology/b-metabolic-flux-x-linear-programming.yaml"
    },
    {
      "id": "b-michaelis-menten-enzyme-kinetics",
      "title": "Michaelis-Menten enzyme kinetics ↔ hyperbolic saturation — a universal functional form across biology, chemistry, and ecology",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "The Michaelis-Menten equation v = V_max[S]/(K_M + [S]) describes enzyme-catalysed reaction rates via a quasi-steady-state approximation (Briggs & Haldane 1925) applied to the E + S ⇌ ES → E + P mechanism. The Michaelis constant K_M ≈ (k_off + k_cat)/k_on is the substrate concentration at half-maxima",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-universal-saturation-kinetics-evolutionary-origin"
      ],
      "communication_gap": "The Michaelis-Menten equation was published in a German biochemistry journal in 1913 and the Langmuir isotherm in an American chemistry journal in 1918 — the communities were completely separate. Monod rediscovered the same form in microbiology in 1949. Holling derived it independently for ecology in 1959. Each field considers the equation 'theirs', teaches it with different notation, and rarely acknowledges the cross-domain equivalence. This perpetuates independent parameter estimation methodology and hampers cross-domain model transfer.\n",
      "translation_table": [
        {
          "field_a_term": "maximum velocity V_max (enzyme kinetics)",
          "field_b_term": "maximum adsorption θ_max (Langmuir) / μ_max (Monod) / a (Holling)",
          "note": "The saturation ceiling — set by the number of active sites or handling time"
        },
        {
          "field_a_term": "Michaelis constant K_M",
          "field_b_term": "Langmuir K_d (dissociation const) / Monod K_s / Holling h (half-saturation)",
          "note": "The half-saturation constant — substrate/ligand concentration at 50% of maximum"
        },
        {
          "field_a_term": "enzyme-substrate complex ES",
          "field_b_term": "bound receptor-ligand complex / adsorbed molecule / predator handling prey",
          "note": "The occupied state that saturates the system"
        },
        {
          "field_a_term": "k_cat (catalytic rate constant)",
          "field_b_term": "reaction rate after binding / desorption rate / prey handling rate",
          "note": "The rate-limiting step following binding"
        },
        {
          "field_a_term": "substrate concentration [S]",
          "field_b_term": "ligand/adsorbate concentration / nutrient concentration / prey density",
          "note": "The independent variable driving the saturable process"
        },
        {
          "field_a_term": "competitive inhibition (shifts K_M)",
          "field_b_term": "ligand competition / nutrient competition between species",
          "note": "Same competitive displacement mathematics in all domains"
        }
      ],
      "references": [
        {
          "note": "Michaelis & Menten (1913) Biochem Z 49:333 — original enzyme kinetics derivation",
          "url": "https://doi.org/10.1016/S0307-4412(13)00153-8"
        },
        {
          "note": "Briggs & Haldane (1925) Biochem J 19:338 — quasi-steady-state approximation",
          "doi": "10.1042/bj0190338"
        },
        {
          "doi": "10.1021/ja02242a004",
          "note": "Langmuir (1918) J Am Chem Soc 40:1361 — adsorption isotherm"
        },
        {
          "note": "Monod (1949) Annu Rev Microbiol 3:371 — microbial growth kinetics",
          "doi": "10.1146/annurev.mi.03.100149.002103"
        }
      ],
      "last_reviewed": "2026-05-06",
      "file": "cross-domain/chemistry-biology/b-michaelis-menten-enzyme-kinetics.yaml"
    },
    {
      "id": "b-photosynthesis-quantum-energy-transfer",
      "title": "Photosynthetic light harvesting couples near-unity quantum efficiency of primary charge separation (P680 in PSII) to Förster resonance energy transfer through antenna complexes, with disputed quantum coherence (Fleming 2007 FMO beats at 77K) operating within the Z-scheme architecture that achieves sufficient redox span to split water and reduce NADP⁺.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "Photosystem II (PSII) is the only biological machine that oxidizes water: the Mn₄CaO₅ cluster (oxygen-evolving complex, OEC) accumulates four oxidizing equivalents via the Kok S-state cycle (S0→S1→S2→S3→S4→S0), releasing O₂ and 4H⁺ per water pair oxidized. Charge separation at the reaction center P6",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-vibronic-coupling-fmo-coherence-functional-enhancement"
      ],
      "communication_gap": "Quantum biology (FMO coherence) was primarily developed by physical chemists using 2D electronic spectroscopy (Fleming group at Berkeley), while plant biochemists studying PSII mechanism rarely use ultrafast spectroscopy. The controversy over coherence vs. classical transfer required simultaneous expertise in quantum optics, protein biochemistry, and molecular dynamics simulation — available in very few groups. The Z-scheme is textbook biology but the thermodynamic analysis (Carnot efficiency, tandem cell analogy) is taught only in physical chemistry courses.\n",
      "translation_table": [
        {
          "field_a_term": "excitation energy transfer (EET) via Förster resonance",
          "field_b_term": "exciton hopping between chromophores in antenna complex"
        },
        {
          "field_a_term": "quantum coherence (off-diagonal density matrix elements)",
          "field_b_term": "wave-like energy delocalization across multiple chlorophyll molecules"
        },
        {
          "field_a_term": "charge separation ΔG (free energy of electron transfer)",
          "field_b_term": "photochemical work stored in redox potential difference"
        },
        {
          "field_a_term": "Carnot efficiency limit (T_hot=5800K sun, T_cold=300K Earth)",
          "field_b_term": "~10% thermodynamic efficiency of oxygenic photosynthesis"
        },
        {
          "field_a_term": "Z-scheme series connection of two photoelectrochemical cells",
          "field_b_term": "two-photosystem tandem solar cell architecture"
        }
      ],
      "references": [
        {
          "doi": "10.1038/nature05678",
          "note": "Fleming et al. (2007) Nature 446:782 — quantum coherence in photosynthetic light harvesting (FMO)"
        },
        {
          "doi": "10.1038/nature10019",
          "note": "Umena et al. (2011) Nature 473:55 — crystal structure of oxygen-evolving PSII at 1.9 Å"
        },
        {
          "note": "Blankenship (2014) Molecular Mechanisms of Photosynthesis — 2nd ed., Wiley-Blackwell"
        },
        {
          "doi": "10.1146/annurev.physchem.040808.090424",
          "note": "Cheng & Fleming (2009) Annu Rev Phys Chem 60:241 — dynamics of light harvesting in photosynthesis"
        }
      ],
      "last_reviewed": "2026-05-06",
      "file": "cross-domain/chemistry-biology/b-photosynthesis-quantum-energy-transfer.yaml"
    },
    {
      "id": "b-prion-fold-x-protein-phase-separation",
      "title": "Prion folding x Protein phase separation — conformational templating as nucleation\n",
      "source_domain": "",
      "target_domain": "",
      "status": "proposed",
      "bridge_claim": "Prion conformational templating (a misfolded protein recruiting correctly folded copies) and liquid-liquid phase separation nucleation (a condensate seed recruiting soluble protein) are governed by the same nucleation-growth kinetics; both are described by classical nucleation theory with conformati",
      "open_unknowns": [],
      "related_hypotheses": [],
      "communication_gap": "Prion biology developed in the neurodegenerative disease context; phase separation emerged in cell biology. The two fields rarely cite each other despite sharing nucleation kinetics formalism.\n",
      "translation_table": [
        {
          "field_a_term": "Prion misfolded seed (nucleus)",
          "field_b_term": "Phase-separation condensate droplet nucleus",
          "note": "Both act as thermodynamic nucleation seeds that lower the activation barrier for further recruitment — the critical nucleus size is analogous in both systems.\n"
        },
        {
          "field_a_term": "Conformational free energy barrier DeltaG_conf",
          "field_b_term": "Interfacial free energy gamma of condensate",
          "note": "Both set the energetic barrier for the phase transition; in prions the barrier is conformational, in LLPS it is surface tension at the droplet interface.\n"
        }
      ],
      "references": [
        {
          "doi": "10.1016/j.cell.2017.08.048",
          "note": "Alberti & Hyman (2021) — biomolecular condensates at the nexus of cellular stress, protein aggregation disease, and aging.\n"
        }
      ],
      "last_reviewed": "2026-05-07",
      "file": "cross-domain/chemistry-biology/b-prion-fold-x-protein-phase-separation.yaml"
    },
    {
      "id": "b-protein-post-translational-modifications",
      "title": "Protein post-translational modifications bridge chemistry and biology: the PTM code — phosphorylation, ubiquitination, acetylation, glycosylation, and SUMOylation — acts as a combinatorial language that expands the proteome 100-fold and enables the epigenetic histone code.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "Post-translational modifications (PTMs) are covalent chemical additions to amino acid side chains that expand proteome diversity and regulatory complexity far beyond what the genome encodes. The major PTMs: (1) Phosphorylation: kinases transfer γ-phosphate from ATP to Ser/Thr/Tyr hydroxyl groups — ~",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-histone-code-combinatorial-specificity-exceeds-single-mark-models"
      ],
      "communication_gap": "Biochemists who characterize PTM enzyme mechanisms (kcat, Km, inhibitor Ki) and cell biologists who study PTM-dependent signaling pathways often do not integrate with structural biologists (PTM-reader domain structures by X-ray/cryo-EM) or proteomics scientists (global PTM profiling). The histone code hypothesis is treated as biology; the underlying organic chemistry of the modification reactions is rarely emphasized in cell biology curricula.\n",
      "translation_table": [
        {
          "field_a_term": "kinase (ATP → phosphate transfer)",
          "field_b_term": "enzyme-catalyzed nucleophilic substitution (γ-phosphate → Ser/Thr/Tyr)",
          "note": "phosphorylation changes local charge, creates binding site for SH2/FHA domains"
        },
        {
          "field_a_term": "ubiquitin E3 ligase (substrate specificity)",
          "field_b_term": "isopeptide bond chemistry (Lys ε-NH₂ + ubiquitin C-terminus)",
          "note": "E3 determines substrate; chemistry is E2-catalyzed thioester → lysine aminolysis"
        },
        {
          "field_a_term": "K48 vs K63 polyubiquitin chain linkage",
          "field_b_term": "chain topology determines protein conformation → different cellular fate",
          "note": "same chemistry (isopeptide bond) but different linkage position creates distinct signals"
        },
        {
          "field_a_term": "histone acetylation (H3K9ac, H3K27ac)",
          "field_b_term": "acetyl-CoA-dependent acetylation; charge neutralization on Lys ε-NH₃⁺",
          "note": "HAT reaction: acetyl-CoA + Lys-NH₂ → CoA + Lys-NHCOCH₃; charge effect opens chromatin"
        },
        {
          "field_a_term": "sirtuin deacetylase (NAD⁺-dependent)",
          "field_b_term": "NAD⁺ + acetyl-Lys → nicotinamide + O-acetyl-ADP-ribose + Lys",
          "note": "unique chemistry (not simple hydrolysis); links PTM to cellular NAD⁺ / metabolic state"
        }
      ],
      "references": [
        {
          "doi": "10.1016/0092-8674(95)90405-0",
          "note": "Hunter (1995) Protein kinases and phosphatases — the yin and yang of protein phosphorylation; Cell 80:225"
        },
        {
          "doi": "10.1146/annurev.biochem.70.1.503",
          "note": "Pickart (2001) Mechanisms underlying ubiquitination; Annu Rev Biochem 70:503"
        },
        {
          "doi": "10.1038/35053702",
          "note": "Strahl & Allis (2000) The language of covalent histone modifications; Nature 403:41"
        },
        {
          "doi": "10.1126/science.1075762",
          "note": "Manning et al. (2002) The protein kinase complement of the human genome; Science 298:1912"
        }
      ],
      "last_reviewed": "2026-05-07",
      "file": "cross-domain/chemistry-biology/b-protein-post-translational-modifications.yaml"
    },
    {
      "id": "b-alphafold-structure-priors-x-enzyme-engineering-screen-pruning",
      "title": "AlphaFold structural priors connect protein-structure prediction with enzyme engineering screen prioritization.",
      "source_domain": "",
      "target_domain": "",
      "status": "proposed",
      "bridge_claim": "Speculative analogy: Predicted structure-confidence patterns can serve as priors for pruning enzyme design search spaces before expensive wet-lab screening.",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-alphafold-confidence-weighted-screening-improves-enzyme-hit-rates"
      ],
      "communication_gap": "Structure-prediction communities optimize fold accuracy, while enzyme programs optimize activity and manufacturability under tight screening budgets.",
      "translation_table": [],
      "references": [
        {
          "doi": "10.1038/s41586-021-03819-2",
          "note": "Highly accurate protein structure prediction with AlphaFold."
        }
      ],
      "last_reviewed": "2026-05-08",
      "file": "cross-domain/chemistry-computer-science/b-alphafold-structure-priors-x-enzyme-engineering-screen-pruning.yaml"
    },
    {
      "id": "b-energy-landscape-funnels-x-protein-ligand-docking-search",
      "title": "Energy-landscape funnel theory bridges statistical physics and protein-ligand docking search design.",
      "source_domain": "",
      "target_domain": "",
      "status": "proposed",
      "bridge_claim": "Speculative analogy: Docking search strategies can use funnel-ruggedness diagnostics from energy-landscape theory to avoid overcommitting to shallow local minima during pose exploration.",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-funnel-aware-search-reduces-docking-decoy-traps"
      ],
      "communication_gap": "Communities use different terminology and validation conventions, masking transferable method equivalence.",
      "translation_table": [],
      "references": [
        {
          "url": "https://www.science.org/doi/10.1126/science.271.5248.487",
          "note": "Energy landscape perspective for protein folding."
        }
      ],
      "last_reviewed": "2026-05-08",
      "file": "cross-domain/chemistry-computer-science/b-energy-landscape-funnels-x-protein-ligand-docking-search.yaml"
    },
    {
      "id": "b-reaction-networks-x-petri-nets",
      "title": "Reaction Networks x Petri Nets — chemical stoichiometry as token flow\n",
      "source_domain": "",
      "target_domain": "",
      "status": "proposed",
      "bridge_claim": "Chemical reaction networks (CRNs) are exactly Petri nets: species are places, reactions are transitions, stoichiometric coefficients are arc weights, and concentration dynamics are token flows; Petri net reachability theory decides CRN persistence (no species goes extinct) and provides a complete co",
      "open_unknowns": [],
      "related_hypotheses": [],
      "communication_gap": "Chemists developed stoichiometric network analysis (Clarke 1980s) independently of computer scientists developing Petri net theory (Petri 1962); the isomorphism between CRNs and Petri nets was recognized by chemists (Schuster, Heinrich) in the 1990s but not adopted by the CS Petri net community, creating two separate bodies of literature on identical mathematical objects.\n",
      "translation_table": [
        {
          "field_a_term": "Chemical species (S_i)",
          "field_b_term": "Place in Petri net",
          "note": "Each molecular species maps to a Petri net place; the number of molecules (or concentration) corresponds to the token count at that place.\n"
        },
        {
          "field_a_term": "Chemical reaction (stoichiometric equation)",
          "field_b_term": "Transition in Petri net",
          "note": "Each reaction consumes tokens from reactant places (stoichiometric coefficients = input arc weights) and deposits tokens at product places (output arc weights); the reaction fires when all reactant places have sufficient tokens.\n"
        },
        {
          "field_a_term": "Mass-action kinetics (rate = k·∏[S_i]^νi)",
          "field_b_term": "Stochastic Petri net firing rates",
          "note": "Stochastic Petri nets assign firing rates to transitions; mass-action kinetics give the exact firing rate as a function of token counts, connecting chemical kinetics to Petri net dynamics.\n"
        },
        {
          "field_a_term": "CRN persistence (no species extinction)",
          "field_b_term": "Petri net liveness (all transitions can fire)",
          "note": "A CRN is persistent if no species concentration reaches zero from positive initial conditions; this is equivalent to Petri net liveness (no dead transition), decidable by linear programming over the place invariants.\n"
        }
      ],
      "references": [
        {
          "doi": "10.1007/978-3-642-14684-8_1",
          "note": "Angeli (2011) — Petri nets, chemical reaction networks; Lecture Notes in Mathematics"
        }
      ],
      "last_reviewed": "2026-05-07",
      "file": "cross-domain/chemistry-cs/b-reaction-networks-x-petri-nets.yaml"
    },
    {
      "id": "b-chemical-ecology-signaling-networks",
      "title": "Organismal chemical communication (pheromones, allelochemicals, quorum sensing) forms a molecular information network governed by the same channel-capacity mathematics as telecommunications",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "Organisms communicate, defend, and cooperate via chemical signals forming a molecular information network. Pheromones (insects), allelopathic chemicals (plants inhibiting neighbours), and microbial quorum sensing autoinducers constitute a chemical internet with definable channel capacity C = B log₂(",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-chemical-signaling-channel-capacity-limits-ecosystem-complexity"
      ],
      "communication_gap": "Chemical ecology and information theory developed independently; ecologists rarely quantify communication in bits, and information theorists do not model chemical diffusion channels. Shannon's framework was applied to molecular communication only in the 2000s (Akyildiz et al.) and rarely extended back to natural chemical ecology.\n",
      "translation_table": [
        {
          "field_a_term": "pheromone/autoinducer molecule",
          "field_b_term": "carrier signal / symbol",
          "note": "Chemical identity encodes information; receptor binding is decoding"
        },
        {
          "field_a_term": "diffusion coefficient D",
          "field_b_term": "channel bandwidth B",
          "note": "Diffusion rate limits the speed of chemical information transmission"
        },
        {
          "field_a_term": "receptor binding affinity (K_d)",
          "field_b_term": "detector sensitivity / noise floor",
          "note": "Low K_d = high sensitivity; noise floor sets minimum detectable signal"
        },
        {
          "field_a_term": "quorum-sensing threshold concentration",
          "field_b_term": "decision threshold in hypothesis testing",
          "note": "Binary decision (respond/not respond) based on signal exceeding threshold"
        }
      ],
      "references": [
        {
          "note": "Karlson & Lüscher (1959) — pheromone concept",
          "doi": "10.1038/183055a0"
        },
        {
          "note": "Whittaker & Feeny (1971) — allelochemics and chemical ecology",
          "doi": "10.1126/science.171.3973.757"
        },
        {
          "note": "Keller & Surette (2006) — quorum sensing and microbial communication",
          "doi": "10.1038/nrmicro1511"
        },
        {
          "note": "Wyatt (2014) Pheromones and Animal Behaviour — comprehensive review"
        }
      ],
      "last_reviewed": "2026-05-06",
      "file": "cross-domain/chemistry-ecology/b-chemical-ecology-signaling-networks.yaml"
    },
    {
      "id": "b-catalysis-reactor-design",
      "title": "The Langmuir-Hinshelwood mechanism — reactants adsorb on catalyst surfaces and react there, with rate determined by surface coverage isotherms — and the Sabatier volcano principle — optimal catalysts bind intermediates with intermediate affinity — provide the molecular-scale physical chemistry that underpins macroscale chemical reactor design equations (CSTR, PFR, Damköhler number), bridging surface science to industrial process engineering.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "Heterogeneous catalysis — where reactants in gas or liquid phase react on a solid catalyst surface — is the foundation of the modern chemical industry (Haber-Bosch ammonia synthesis, Fischer-Tropsch, catalytic cracking, automotive catalytic converters). The physical chemistry of surface reactions de",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-dft-bep-relationship-enables-quantitative-catalyst-design-before-synthesis"
      ],
      "communication_gap": "Surface chemists (studying adsorption, binding energies, elementary steps) publish in Journal of Catalysis, ACS Catalysis, and Surface Science; chemical engineers designing reactors publish in Chemical Engineering Science, AIChE Journal, and Industrial & Engineering Chemistry Research. The curricula diverge: surface chemistry is a physical chemistry course, reactor design is a chemical engineering course. The quantitative connection — using DFT binding energies directly in reactor design equations via microkinetic models — has been established only in the last 20 years and is not yet standard in either community's textbooks.\n",
      "translation_table": [
        {
          "field_a_term": "Adsorption equilibrium constant K_A (surface chemistry)",
          "field_b_term": "Binding energy parameter in reactor rate expression",
          "note": "K_A = exp(-ΔG_ads/RT) — determined by DFT or temperature-programmed desorption"
        },
        {
          "field_a_term": "Surface coverage θ_A (Langmuir isotherm)",
          "field_b_term": "Concentration of active surface species in reactor model",
          "note": "Langmuir isotherm bridges gas-phase concentration to surface concentration"
        },
        {
          "field_a_term": "Sabatier principle (intermediate binding energy optimal)",
          "field_b_term": "Catalyst screening criterion for process development",
          "note": "Volcano plot from BEP relations allows rational prediction of best catalyst"
        },
        {
          "field_a_term": "Damköhler number Da = reaction/transport rate",
          "field_b_term": "Reactor operating regime: kinetic (Da<<1) vs. transport-limited (Da>>1)",
          "note": "Da determines whether a larger reactor or faster catalyst is needed"
        },
        {
          "field_a_term": "Weisz-Prater criterion Φ_WP (internal diffusion limitation)",
          "field_b_term": "Catalyst pellet design criterion: optimise particle size vs. surface area",
          "note": "Smaller pellets increase D_eff but increase pressure drop; optimum exists"
        },
        {
          "field_a_term": "Brønsted-Evans-Polanyi (BEP) relationship (activation energy vs. binding energy)",
          "field_b_term": "Quantitative prediction of catalyst turnover frequency from binding energy",
          "note": "BEP: ΔE_act = α·ΔE_bind + β — allows DFT screening of catalysts before synthesis"
        }
      ],
      "references": [
        {
          "doi": "10.1021/ja02242a004",
          "note": "Langmuir (1918) The adsorption of gases on plane surfaces of glass, mica and platinum, J Am Chem Soc 40:1361 — Langmuir isotherm and surface kinetics\n"
        },
        {
          "note": "Sabatier (1902) Comptes Rendus 134:514 — original statement of optimal intermediate binding strength for catalysis (Sabatier principle)\n"
        },
        {
          "note": "Levenspiel (1999) Chemical Reaction Engineering, 3rd ed., Wiley — standard text covering CSTR, PFR, Damköhler number, and Weisz-Prater criterion\n"
        },
        {
          "doi": "10.1006/jcat.2002.3543",
          "note": "Nørskov et al. (2002) Universality in heterogeneous catalysis, J Catal 209:275 — DFT-based BEP relationships and volcano plots for ammonia synthesis; rational prediction of Ru as optimal catalyst\n"
        }
      ],
      "last_reviewed": "2026-05-06",
      "file": "cross-domain/chemistry-engineering/b-catalysis-reactor-design.yaml"
    },
    {
      "id": "b-corrosion-science-materials-protection",
      "title": "Electrochemical corrosion science (Evans diagrams, Pourbaix equilibria, passivation thermodynamics) provides the quantitative foundation for engineering corrosion protection strategies that collectively address ~3.4% of global GDP in losses annually.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "Corrosion is electrochemical: a galvanic cell where the anode oxidises (Fe → Fe²⁺ + 2e⁻) and the cathode reduces (O₂ + 2H₂O + 4e⁻ → 4OH⁻). The Evans diagram (mixed potential theory) superimposes anodic and cathodic polarization curves to find the corrosion potential E_corr and corrosion current i_co",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-ml-accelerated-corrosion-inhibitor-discovery"
      ],
      "communication_gap": "Academic electrochemistry is dominated by energy storage and conversion research (batteries, fuel cells); corrosion research is often viewed as applied and unglamorous, underfunded relative to its economic impact. Civil and mechanical engineers who specify materials often have no electrochemistry training. The standards literature (ASTM, ISO) that governs engineering practice rarely cites academic corrosion electrochemistry, creating a slow knowledge transfer pathway of 10–20 years from discovery to standard.\n",
      "translation_table": [
        {
          "field_a_term": "standard electrode potential E° (electrochemistry)",
          "field_b_term": "galvanic series rank (engineering)",
          "note": "Engineering galvanic series is an empirical approximation to thermodynamic E° in seawater"
        },
        {
          "field_a_term": "Pourbaix diagram (E vs. pH stability regions)",
          "field_b_term": "corrosion, passivation, and immunity zones for design decisions",
          "note": "Engineers use Pourbaix to select material + environment combinations avoiding corrosion regions"
        },
        {
          "field_a_term": "anodic Tafel slope βₐ",
          "field_b_term": "polarization resistance Rₚ = βₐβc/[2.3·i_corr·(βₐ+βc)]",
          "note": "Stern-Geary equation converts laboratory polarization curves to service corrosion rate"
        },
        {
          "field_a_term": "passivation current density i_pass",
          "field_b_term": "repassivation time after scratch (engineering specification)",
          "note": "Stainless steel specification requires repassivation in <1 s in service environments"
        },
        {
          "field_a_term": "double-layer capacitance C_dl",
          "field_b_term": "electrochemical impedance spectroscopy (EIS) circuit element",
          "note": "EIS allows in-situ coating degradation monitoring — used in infrastructure health monitoring"
        }
      ],
      "references": [
        {
          "note": "Evans (1960) The Corrosion and Oxidation of Metals, Arnold — mixed potential theory"
        },
        {
          "note": "Fontana (1986) Corrosion Engineering, 3rd ed., McGraw-Hill"
        },
        {
          "doi": "10.1002/maco.201608562",
          "note": "Koch et al. (2016) — Global costs of corrosion, Mater Performance Suppl., $2.5T estimate"
        },
        {
          "note": "Pourbaix (1966) Atlas of Electrochemical Equilibria in Aqueous Solutions, Pergamon"
        }
      ],
      "last_reviewed": "2026-05-06",
      "file": "cross-domain/chemistry-engineering/b-corrosion-science-materials-protection.yaml"
    },
    {
      "id": "b-electrochemistry-battery-technology",
      "title": "Li-ion battery operation is governed by electrochemical thermodynamics (Nernst equation, Butler-Volmer kinetics) and solid-state physics (lithium chemical potential in intercalation compounds), with the solid electrolyte interphase (SEI) as a nano-engineered passivation layer whose chemistry determines cycle life, and solid-state batteries replacing liquid electrolytes with Li₇La₃Zr₂O₁₂ (LLZO) to eliminate dendrite failure modes.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "Li-ion batteries are electrochemical engines whose performance reduces entirely to electrode thermodynamics and kinetics. Cathode half-reaction: Li₁₋ₓCoO₂ + xLi⁺ + xe⁻ ↔ LiCoO₂ (E°≈+4.1 V vs Li/Li⁺). Anode: C₆ + xLi⁺ + xe⁻ ↔ LiₓC₆ (E°≈+0.1 V vs Li/Li⁺). Cell voltage determined by lithium chemical po",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-llzo-single-ion-conductor-eliminates-dendrite-nucleation"
      ],
      "communication_gap": "Battery science sits at the intersection of solid-state physics, electrochemistry, and materials engineering — no single academic department owns it. Electrochemists rarely engage with the solid-state physics community developing LLZO, and materials engineers often lack deep electrochemical thermodynamics training. Industrial battery research is proprietary, hiding results from academic researchers. The field also splits between inorganic chemistry (cathode materials) and polymer chemistry (electrolytes), with limited cross-talk.\n",
      "translation_table": [
        {
          "field_a_term": "lithium chemical potential μ_Li in electrode",
          "field_b_term": "open-circuit voltage of battery cell",
          "note": "Voltage is a direct thermodynamic measurement of the Li chemical potential difference"
        },
        {
          "field_a_term": "Butler-Volmer exchange current density j₀",
          "field_b_term": "maximum charge/discharge rate (C-rate capability)",
          "note": "j₀ depends exponentially on activation energy; low j₀ limits fast charging"
        },
        {
          "field_a_term": "SEI ionic conductivity σ_Li",
          "field_b_term": "internal resistance increase and cycle life degradation",
          "note": "SEI grows as √t (diffusion-limited); thicker SEI = higher resistance"
        },
        {
          "field_a_term": "Li⁺ transference number t₊ in electrolyte",
          "field_b_term": "concentration polarization and power fade",
          "note": "In liquid electrolytes t₊ ≈ 0.4; single-ion conductors give t₊ = 1"
        },
        {
          "field_a_term": "dendrite nucleation criterion (tip electric field)",
          "field_b_term": "battery short-circuit failure mode",
          "note": "LLZO eliminates dendrites by mechanical shear modulus G > 8.5 GPa (Monroe-Newman)"
        }
      ],
      "references": [
        {
          "doi": "10.1021/cm901452z",
          "note": "Goodenough & Kim (2010) Challenges for rechargeable Li batteries. Chem Mater 22:587"
        },
        {
          "doi": "10.1038/35104644",
          "note": "Tarascon & Armand (2001) Issues and challenges facing rechargeable lithium batteries. Nature 414:359"
        },
        {
          "doi": "10.1149/1.2128859",
          "note": "Peled (1979) The electrochemical behavior of alkali and alkaline earth metals in nonaqueous battery systems. J Electrochem Soc 126:2047"
        },
        {
          "doi": "10.1038/nenergy.2016.141",
          "note": "Janek & Zeier (2016) A solid future for battery development. Nat Energy 1:16141"
        }
      ],
      "last_reviewed": "2026-05-06",
      "file": "cross-domain/chemistry-engineering/b-electrochemistry-battery-technology.yaml"
    },
    {
      "id": "b-green-chemistry-atom-economy",
      "title": "Anastas-Warner's 12 Principles of Green Chemistry and Trost's atom economy metric (AE = MW(product)/ΣMW(all products) × 100%) provide a quantitative engineering framework for reaction design that bridges organic synthesis with industrial process efficiency and life cycle environmental impact assessment.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "Green chemistry (Anastas & Warner 1998) recasts synthetic chemistry as an engineering optimization problem with environmental constraints. The 12 Principles define a design space: Atom Economy (Principle 2), catalysis (Principle 9), and renewable feedstocks (Principle 7) are quantitatively tractable",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-co2-feedstock-polycarbonate-cascade-net-carbon-neutral"
      ],
      "communication_gap": "Organic chemistry and chemical engineering have historically been separate academic departments with different journals (JACS, Angewandte for chemistry; AIChE J, Chem Eng Sci for engineering) and conferences. Green chemistry principles are taught in chemistry departments but E-factor and LCA are primarily used by process engineers in industry. The 12 Principles are qualitative enough to appeal to academic chemists but quantitative implementation (solvent selection tools, mass intensity calculations) requires engineering training. Industrial chemists computing E-factors rarely engage with the synthetic chemistry literature proposing fundamentally different routes.\n",
      "translation_table": [
        {
          "field_a_term": "atom economy AE (%) — fraction of reactant atoms in desired product",
          "field_b_term": "process yield efficiency in chemical engineering mass balance"
        },
        {
          "field_a_term": "E-factor (kg waste / kg product)",
          "field_b_term": "specific waste load in industrial process design"
        },
        {
          "field_a_term": "catalytic cycle (catalyst regenerated, not consumed)",
          "field_b_term": "closed-loop process with zero net reagent consumption"
        },
        {
          "field_a_term": "stoichiometric reagent (consumed in reaction)",
          "field_b_term": "open-loop process with linear material throughput"
        },
        {
          "field_a_term": "life cycle assessment (LCA) of synthetic route",
          "field_b_term": "full system boundary environmental engineering analysis"
        },
        {
          "field_a_term": "supercritical CO₂ (Tc = 31.1°C, Pc = 73.8 bar)",
          "field_b_term": "benign process solvent with tunable solvation properties"
        },
        {
          "field_a_term": "inherently safer chemistry (Principle 12)",
          "field_b_term": "intrinsic safety design in process engineering (Kletz 1978)"
        }
      ],
      "references": [
        {
          "note": "Anastas & Warner (1998) Green Chemistry: Theory and Practice — Oxford University Press (12 Principles)"
        },
        {
          "doi": "10.1126/science.254.5037.1471",
          "note": "Trost (1991) Science 254:1471 — the atom economy: a search for synthetic efficiency"
        },
        {
          "note": "Sheldon (1992) Chem Ind 23:903 — organic synthesis: past, present and future (E-factor introduced)"
        },
        {
          "doi": "10.1126/science.284.5421.1780",
          "note": "Leitner (1999) Science 284:1780 — supercritical carbon dioxide as a green reaction medium"
        }
      ],
      "last_reviewed": "2026-05-06",
      "file": "cross-domain/chemistry-engineering/b-green-chemistry-atom-economy.yaml"
    },
    {
      "id": "b-membrane-fouling-dlvo-colloidal-deposition",
      "title": "Membrane fouling by colloidal particles is governed by DLVO theory from colloid chemistry, where the interplay of van der Waals attraction and electrostatic double-layer repulsion determines whether particles deposit on membrane surfaces and cause flux decline.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "DLVO theory (Derjaguin-Landau-Verwey-Overbeek) predicts colloid stability via the total interaction energy V_T = V_vdW + V_EDL, where van der Waals attraction V_vdW ≈ -A_H·a/(6h) (A_H = Hamaker constant, a = particle radius, h = separation) and electrostatic repulsion V_EDL ≈ 64πεa(kT/ze)²·tanh²(zeψ",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-dlvo-extension-non-dlvo-forces-membrane-fouling"
      ],
      "communication_gap": "Membrane engineers focus on flux, pressure, and cleaning protocols while colloid chemists focus on suspension stability and coagulation kinetics; DLVO theory from the 1940s underpins both but cross-disciplinary collaboration remains limited, particularly for complex natural organic matter systems where extended DLVO is needed.\n",
      "translation_table": [
        {
          "field_a_term": "membrane fouling layer (membrane science)",
          "field_b_term": "colloidal aggregate / sediment (colloid chemistry)",
          "note": "Both result from particle deposition when repulsive energy barrier is overcome"
        },
        {
          "field_a_term": "critical flux (membrane science)",
          "field_b_term": "critical coagulation concentration (colloid chemistry)",
          "note": "Both represent the threshold beyond which irreversible deposition dominates"
        },
        {
          "field_a_term": "transmembrane pressure (membrane science)",
          "field_b_term": "applied force overcoming energy barrier (colloid chemistry)",
          "note": "TMP drives particles through the DLVO energy barrier to deposit on membrane surface"
        },
        {
          "field_a_term": "cake resistance Rc (membrane science)",
          "field_b_term": "aggregate packing structure / fractal dimension (colloid chemistry)",
          "note": "Cake layer compressibility is governed by inter-particle DLVO forces in the deposit"
        }
      ],
      "references": [
        {
          "doi": "10.1021/es00098a012",
          "note": "Elimelech & O'Melia (1990) - kinetics of deposition of colloidal particles in porous media"
        },
        {
          "doi": "10.1016/S0376-7388(97)00109-X",
          "note": "Bowen & Jenner (1995) - theoretical descriptions of membrane filtration of colloids"
        },
        {
          "doi": "10.1016/j.memsci.2006.02.030",
          "note": "Brant & Childress (2004) - assessing short-range membrane-colloid interactions using surface energetics"
        }
      ],
      "last_reviewed": "2026-05-07",
      "file": "cross-domain/chemistry-engineering/b-membrane-fouling-dlvo-colloidal-deposition.yaml"
    },
    {
      "id": "b-nuclear-chemistry-reactor-physics",
      "title": "Nuclear reactor physics bridges chemistry and engineering: the six-factor formula (k = ╬╖fp╬╡P_NL) governs criticality from fission cross-sections, the thorium cycle offers proliferation-resistant breeding, and Generation IV reactor designs (MSR, GFR) pursue passive safety through thermodynamic and neutronics principles.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "Nuclear fission: ┬▓┬│Γü╡U + n ΓåÆ fission products + 2-3 prompt neutrons + ~200 MeV total energy (~170 MeV kinetic energy of fission fragments + 20 MeV from delayed gamma and beta). The criticality condition is governed by the six-factor formula: k_eff = ╬╖┬╖f┬╖p┬╖╬╡┬╖P_TNL┬╖P_FNL where ╬╖ = neutron",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-thorium-msr-achieves-baseload-carbon-free-power-lower-waste"
      ],
      "communication_gap": "Nuclear chemists (who understand isotope production, fuel chemistry, reprocessing) and nuclear engineers (who design reactor systems, safety systems, thermodynamics) work in overlapping but distinct communities. Policy makers who set nuclear energy strategy rarely have deep understanding of nuclear physics or chemistry. The public discourse conflates reactor-grade plutonium (weapons-unsuitable) with weapons-grade ΓÇö a chemistry distinction with profound policy implications.\n",
      "translation_table": [
        {
          "field_a_term": "fission cross-section ╧â_f (barns)",
          "field_b_term": "reaction probability for neutron-nucleus interaction (quantum mechanical)",
          "note": "1 barn = 10Γü╗┬▓Γü┤ cm┬▓; thermal ┬▓┬│Γü╡U cross-section 583 b >> fast spectrum 1 b ΓÇö moderator essential"
        },
        {
          "field_a_term": "multiplication factor k_eff",
          "field_b_term": "neutron balance per generation; k=1 ΓåÆ steady state; k>1 ΓåÆ exponential growth",
          "note": "k_eff relates the entire nuclear chain reaction; equals product of six factors in thermal reactors"
        },
        {
          "field_a_term": "neutron moderation (elastic scattering, energy transfer)",
          "field_b_term": "slowing neutrons from MeV to meV scale via billiard-ball collisions",
          "note": "maximum energy transfer when mass equal; H (mass 1) is most efficient; graphite (mass 12) less so"
        },
        {
          "field_a_term": "thorium-233U cycle (┬▓┬│┬▓Th ΓåÆ ┬▓┬│┬│U)",
          "field_b_term": "breeding cycle using fertile ┬▓┬│┬▓Th to produce fissile ┬▓┬│┬│U",
          "note": "┬▓┬│┬│U has high thermal ╬╖ (> ┬▓┬│Γü╡U), low transuranic production, and no weapons-grade plutonium pathway"
        },
        {
          "field_a_term": "negative temperature coefficient (╬▒_T < 0)",
          "field_b_term": "self-regulating feedback ΓÇö as T increases, k_eff decreases ΓåÆ power falls",
          "note": "Doppler broadening of ┬▓┬│Γü╕U resonances at higher T increases resonance capture ΓåÆ negative void coefficient"
        }
      ],
      "references": [
        {
          "note": "Lamarsh & Baratta (2001) Introduction to Nuclear Engineering, 3rd ed.; Prentice Hall"
        },
        {
          "note": "Fermi (1952) Experimental production of a divergent chain reaction; Nobel Lecture"
        },
        {
          "note": "Rubbia et al. (1995) Conceptual design of a fast neutron operated high power energy amplifier; CERN AT/95-44"
        },
        {
          "note": "World Nuclear Association (2023) Nuclear Power Reactors; worldnuclear.org"
        }
      ],
      "last_reviewed": "2026-05-07",
      "file": "cross-domain/chemistry-engineering/b-nuclear-chemistry-reactor-physics.yaml"
    },
    {
      "id": "b-pem-hydrogen-economy",
      "title": "Proton exchange membranes (Nafion) enable both PEM electrolysers and PEM fuel cells via proton-selective transport — bridging polymer chemistry to electrochemical engineering to the hydrogen economy, with Faradaic efficiency determined by membrane selectivity and conductivity.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "Proton exchange membranes (PEM) — primarily Nafion, a perfluorosulfonated ionomer — are the enabling materials technology for the hydrogen energy cycle. The same membrane enables two complementary devices:\n1. PEM ELECTROLYSER (water splitting):\n   Anode: H₂O → ½O₂ + 2H⁺ + 2e⁻   (OER, ΔE° = +1.23 V)\n",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-pem-membrane-beyond-nafion-high-temperature"
      ],
      "communication_gap": "Polymer chemists designing membranes rarely engage with grid-scale energy economics; energy economists rarely understand the materials science constraints on electrolysis efficiency and membrane lifetime. The pathway from sulfonation chemistry to levelised cost of hydrogen is a multi-domain chain that requires simultaneously optimising polymer structure, device engineering, and energy systems economics — but each community publishes in separate journals with minimal overlap.\n",
      "translation_table": [
        {
          "field_a_term": "perfluorosulfonated ionomer (Nafion)",
          "field_b_term": "proton exchange membrane in electrolyser/fuel cell",
          "note": "The same polymer serves as both the PEM electrolyser and fuel cell membrane"
        },
        {
          "field_a_term": "Grotthuss proton hopping mechanism",
          "field_b_term": "membrane ionic conductivity σ (S/cm)",
          "note": "Protons hop between -SO₃H sites via water molecules; conductivity requires >20% hydration"
        },
        {
          "field_a_term": "ion exchange capacity (IEC, meq/g)",
          "field_b_term": "sulfonation degree, proton conductivity, and swelling",
          "note": "Higher IEC increases σ but also water uptake and dimensional instability"
        },
        {
          "field_a_term": "Faradaic efficiency ε_F",
          "field_b_term": "fraction of current that produces H₂ (vs. parasitic crossover)",
          "note": "ε_F = 1 - (crossover current / total current); membrane selectivity directly controls ε_F"
        },
        {
          "field_a_term": "membrane electrode assembly (MEA)",
          "field_b_term": "heart of the PEM device stack",
          "note": "The MEA (membrane + catalyst layers + gas diffusion layers) determines all device performance metrics"
        }
      ],
      "references": [
        {
          "doi": "10.1016/S0376-7388(01)00376-6",
          "note": "Kreuer, K.D. (2001). On the development of proton conducting polymer membranes for hydrogen and methanol fuel cells. J. Membr. Sci. 185:29–39."
        },
        {
          "doi": "10.1021/cr0207123",
          "note": "Mauritz, K.A. & Moore, R.B. (2004). State of understanding of Nafion. Chem. Rev. 104:4535–4585."
        },
        {
          "doi": "10.1038/nature11115",
          "note": "Debe, M.K. (2012). Electrocatalyst approaches and challenges for automotive fuel cells. Nature 486:43–51."
        },
        {
          "doi": "10.1126/science.aad4998",
          "note": "Seh, Z.W. et al. (2017). Combining theory and experiment in electrocatalysis: Insights into materials design. Science 355:eaad4998."
        }
      ],
      "last_reviewed": "2026-05-06",
      "file": "cross-domain/chemistry-engineering/b-pem-hydrogen-economy.yaml"
    },
    {
      "id": "b-polymer-processing-manufacturing",
      "title": "Polymer Processing and Materials Manufacturing — reptation dynamics, WLF equation, electrospinning, and FDM additive manufacturing connect polymer physics to industrial production",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "Polymers are viscoelastic materials exhibiting both viscous (flow) and elastic (recovery) behaviour depending on timescale relative to the relaxation time τ_R. The Maxwell model (spring + dashpot in series) captures stress relaxation: σ(t) = σ₀ e^{−t/τ_R}. The Kelvin–Voigt model (parallel) captures ",
      "open_unknowns": [],
      "related_hypotheses": [],
      "communication_gap": "Polymer physics (de Gennes, Doi, Edwards) is a theoretical condensed matter field; polymer processing (extrusion, injection moulding) is an industrial chemical engineering practice; electrospinning and additive manufacturing are engineering application fields. These communities use different models of polymer behaviour — reptation theory is well known to polymer physicists but rarely used explicitly in industrial processing simulation, where empirical constitutive equations dominate. The additive manufacturing community is only beginning to incorporate quantitative reptation theory into process models.\n",
      "translation_table": [
        {
          "field_a_term": "reptation time tau_rep (chain escape from tube)",
          "field_b_term": "dominant viscoelastic relaxation timescale of entangled melt",
          "note": "tau_rep ~ M^3 (theory) / M^3.4 (experiment); sets minimum time for full stress relaxation in melt processing"
        },
        {
          "field_a_term": "WLF equation (shift factor a_T)",
          "field_b_term": "time-temperature superposition for polymer processing window design",
          "note": "WLF enables construction of master curve from measurements at different temperatures — guides selection of processing temperatures"
        },
        {
          "field_a_term": "Taylor cone formation at critical E field",
          "field_b_term": "electrospinning onset — electrostatic pressure overcomes surface tension",
          "note": "Taylor (1964): cone half-angle 49.3 degrees; Reneker showed that solution concentration controls fiber diameter"
        },
        {
          "field_a_term": "bending instability in electrospun jet",
          "field_b_term": "nanofiber formation via chaotic whipping motion",
          "note": "Whipping instability provides the extreme elongation (1000x draw ratio) that produces nanoscale fiber diameters"
        },
        {
          "field_a_term": "FDM weld line polymer interdiffusion",
          "field_b_term": "inter-layer bond strength in 3D printed parts",
          "note": "Bond strength reaches bulk value only after interdiffusion depth exceeds entanglement mesh size ~ R_e/sqrt(N); controlled by T and dwell time"
        },
        {
          "field_a_term": "glass transition T_g (Flory-Fox equation)",
          "field_b_term": "processing temperature threshold separating solid and viscous flow regimes",
          "note": "Flory-Fox: T_g = T_g,inf - K/M_n — T_g increases with molecular weight, plateauing above entanglement threshold"
        }
      ],
      "references": [
        {
          "doi": "10.1021/ja01018a023",
          "note": "Williams, Landel & Ferry (1955) J Am Chem Soc 77:3701 — WLF equation"
        },
        {
          "doi": "10.1098/rspa.1964.0090",
          "note": "Taylor (1964) Proc R Soc A 280:383 — disintegration of water droplets in electric field"
        },
        {
          "doi": "10.1088/0957-4484/7/3/009",
          "note": "Reneker & Chun (1996) Nanotechnology 7:216 — electrospinning of nanofibers"
        },
        {
          "doi": "10.1007/s11465-013-0248-8",
          "note": "Guo & Leu (2013) Front Mech Eng 8:215 — additive manufacturing review"
        }
      ],
      "last_reviewed": "2026-05-06",
      "file": "cross-domain/chemistry-engineering/b-polymer-processing-manufacturing.yaml"
    },
    {
      "id": "b-chemical-garden-osmotic-precipitation",
      "title": "Chemical gardens — silicate structures that spontaneously grow when metal salts dissolve in sodium silicate solution — are self-organized precipitation systems driven by osmotic pressure across a semipermeable membrane, obeying the same fluid mechanics (Darcy's law, buoyancy-driven flow) and precipitation chemistry (ion product vs. K_sp) that govern hydrothermal vent chimneys and some biomineralization processes",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "A chemical garden forms when a metal salt crystal dissolves, creating an osmotic pressure gradient Pi = RT * delta_C / V_m across a colloidal silicate membrane; fluid is driven inward by osmosis (J = L_p * (Pi - delta_P)), building pressure until the membrane ruptures and a buoyancy-driven upwelling",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-chemical-garden-osmotic-pressure-tube-morphology"
      ],
      "communication_gap": "Chemists study chemical gardens as a qualitative demonstration of self-organization while fluid mechanicists study osmotic membranes and precipitation in industrial and geological contexts; a unified quantitative treatment connecting osmotic, fluid-dynamic, and precipitation parameters was achieved only recently by the Maini-Stone group.",
      "translation_table": [
        {
          "field_a_term": "osmotic pressure Pi across silicate membrane (chemistry)",
          "field_b_term": "pressure gradient driving Darcy flow in porous medium (fluid mechanics)",
          "note": "Osmotic pressure acts as a driving force for inward fluid flow exactly as in Darcy's law"
        },
        {
          "field_a_term": "precipitation of metal silicate / hydroxide at membrane (chemistry)",
          "field_b_term": "mineral precipitation when ion product exceeds K_sp in mixing zone (chemistry/fluid mechanics)",
          "note": "Same thermodynamic criterion governs precipitation in chemical gardens and hydrothermal vents"
        },
        {
          "field_a_term": "buoyancy-driven upwelling inside chemical garden tube (fluid mechanics)",
          "field_b_term": "thermally stratified convective plume in Rayleigh-Benard flow (fluid mechanics)",
          "note": "Density difference between interior fluid and ambient drives the same buoyancy convection"
        },
        {
          "field_a_term": "tube morphology (wide slow-growth vs. filamentous fast-growth) (chemistry)",
          "field_b_term": "laminar vs. turbulent plume regime at different Rayleigh numbers (fluid mechanics)",
          "note": "Tube morphology phase diagram maps onto a Rayleigh-number/Peclet-number diagram for the upwelling jet"
        }
      ],
      "references": [
        {
          "doi": "10.1126/science.aaa7154",
          "note": "Barge et al. (2015) - from chemical gardens to chemobrionics (systematic chemical garden physics review)"
        },
        {
          "doi": "10.1039/C4SM01 462F",
          "note": "Haudin et al. (2014) - spiral precipitation patterns in confined chemical gardens"
        },
        {
          "doi": "10.1039/b110741b",
          "note": "Cartwright et al. (2002) - chemical gardens revisited: self-organisation in precipitation"
        }
      ],
      "last_reviewed": "2026-05-07",
      "file": "cross-domain/chemistry-fluid-mechanics/b-chemical-garden-osmotic-precipitation.yaml"
    },
    {
      "id": "b-vae-x-catalyst-latent-space-screening",
      "title": "Variational autoencoders bridge probabilistic latent-variable learning and catalyst latent-space screening for materials discovery.",
      "source_domain": "",
      "target_domain": "",
      "status": "proposed",
      "bridge_claim": "Speculative analogy (to be empirically validated): VAE latent manifolds can compress catalyst structural descriptors into smooth generative coordinates that support guided exploration of activity-selectivity tradeoffs.",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-vae-latent-regularization-improves-catalyst-hit-rate"
      ],
      "communication_gap": "Generative-model metrics do not directly align with synthesis feasibility and catalytic stability constraints.",
      "translation_table": [
        {
          "field_a_term": "latent variable z",
          "field_b_term": "compressed catalyst descriptor state",
          "note": "Latent coordinates summarize high-dimensional structural features."
        },
        {
          "field_a_term": "decoder likelihood",
          "field_b_term": "candidate structure generation",
          "note": "Generative decoding proposes chemically plausible candidates."
        },
        {
          "field_a_term": "KL regularization",
          "field_b_term": "search-space smoothness control",
          "note": "Regularization helps avoid fragmented latent neighborhoods."
        }
      ],
      "references": [
        {
          "arxiv": "1312.6114",
          "note": "Auto-Encoding Variational Bayes."
        },
        {
          "url": "https://www.energy.gov/eere/fuelcells/catalyst-research",
          "note": "Catalyst discovery program context."
        }
      ],
      "last_reviewed": "2026-05-08",
      "file": "cross-domain/chemistry-machine-learning/b-vae-x-catalyst-latent-space-screening.yaml"
    },
    {
      "id": "b-graph-theory-x-molecular-structure",
      "title": "Graph theory ↔ Molecular structure — topological indices as chemical descriptors",
      "source_domain": "",
      "target_domain": "",
      "status": "proposed",
      "bridge_claim": "Chemical structure-property relationships are encoded by graph-theoretic topological indices (Wiener index, Randić connectivity, Zagreb indices); the Wiener index (sum of all pairwise graph distances) correlates with boiling point across homologous series with r² > 0.99, making graph theory a predic",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-graph-theory-x-molecular-structure"
      ],
      "communication_gap": "Chemical graph theory (H. Wiener 1947, M. Randić 1975) was developed by physical chemists to predict bulk properties without quantum chemistry. Combinatorial mathematicians studying graph invariants rarely collaborated with chemical property prediction researchers, despite sharing identical mathematical objects. The synthesis emerged primarily in the journal Journal of Mathematical Chemistry (founded 1987), but mainstream organic chemistry textbooks still rarely mention topological indices.",
      "translation_table": [
        {
          "field_a_term": "molecular graph (atoms = vertices, bonds = edges)",
          "field_b_term": "undirected weighted graph in graph theory",
          "note": "Bond orders can weight edges; hydrogen-suppressed graphs are standard"
        },
        {
          "field_a_term": "Wiener index W = Σ d(u,v) (sum of all pairwise distances)",
          "field_b_term": "Wiener polarity of a graph (graph-theoretic distance sum)",
          "note": "W predicts boiling point in alkanes with r² > 0.99 for n-alkane homologous series"
        },
        {
          "field_a_term": "Randić connectivity index χ = Σ (dᵢdⱼ)^(-1/2) over edges",
          "field_b_term": "edge-weighted graph functional related to degree sequence",
          "note": "χ correlates with physicochemical properties for broad molecular classes"
        },
        {
          "field_a_term": "Zagreb index M₁ = Σ dᵢ² (sum of squared degrees)",
          "field_b_term": "graph energy functional (sum of squared vertex degrees)",
          "note": "M₁ and M₂ appear in topological descriptors for QSPR models"
        }
      ],
      "references": [
        {
          "doi": "10.1021/ja01193a005",
          "note": "Wiener (1947) — structural determination of paraffin boiling points; J Am Chem Soc 69:17"
        },
        {
          "doi": "10.1021/am50023a600",
          "note": "Randić (1975) — characterization of molecular branching; J Am Chem Soc 97:6609"
        }
      ],
      "last_reviewed": "2026-05-07",
      "file": "cross-domain/chemistry-math/b-graph-theory-x-molecular-structure.yaml"
    },
    {
      "id": "b-graph-theory-molecular-properties",
      "title": "Molecular graphs (atoms as vertices, bonds as edges) encode chemical structure through topological indices correlated with physical properties, and the characteristic polynomial of the adjacency matrix yields Hückel MO energies — bridging graph spectral theory to computational chemistry.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "A molecule is represented as a graph G = (V, E) where vertices are heavy atoms and edges are chemical bonds. Three bridges: (1) Topological indices — the Wiener index W = Σ_{i<j} d(i,j) (sum of all pairwise distances) correlates with boiling point of alkanes (r² > 0.97, Wiener 1947); Zagreb indices ",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-topological-index-drug-bioavailability-prediction"
      ],
      "communication_gap": "Wiener (1947) was a chemist; Hückel (1931) was a physicist-chemist — their graph results were not connected to formal graph theory for decades. The mathematical theory of chemical graph theory (Trinajstić, Gutman, Randić) developed in the 1970s–1990s in chemistry journals, unaware of parallel developments in algebraic graph theory. The NP-hardness connection (graph isomorphism) was only articulated by theoretical computer scientists. GNNs for molecular property prediction rediscover topological indices as trainable features without citing the classical chemical graph theory literature.\n",
      "translation_table": [
        {
          "field_a_term": "graph adjacency matrix A_{ij}",
          "field_b_term": "molecular connectivity matrix (A_{ij}=1 if atoms i,j bonded)"
        },
        {
          "field_a_term": "eigenvalues of A (graph spectrum)",
          "field_b_term": "Hückel MO energy levels ε_k = α + λ_k β"
        },
        {
          "field_a_term": "characteristic polynomial det(xI − A)",
          "field_b_term": "secular determinant in Hückel MO theory"
        },
        {
          "field_a_term": "Wiener index W = Σ d(i,j)",
          "field_b_term": "topological descriptor correlating with boiling point and molar volume"
        },
        {
          "field_a_term": "graph isomorphism problem",
          "field_b_term": "molecular equivalence / stereoisomer identification"
        },
        {
          "field_a_term": "Randić connectivity index χ = Σ_{uv} (deg(u)·deg(v))^{-1/2}",
          "field_b_term": "branching index correlating with octanol-water partition coefficient"
        },
        {
          "field_a_term": "graph planarity (Kuratowski's theorem)",
          "field_b_term": "planarity condition required for classical Hückel theory"
        }
      ],
      "references": [
        {
          "doi": "10.1021/ja01193a005",
          "note": "Wiener (1947) Structural determination of paraffin boiling points. J Am Chem Soc 69:17"
        },
        {
          "note": "Hückel (1931) Quantentheoretische Beiträge zum Benzolproblem. Z Phys 70:204"
        },
        {
          "note": "Trinajstić (1992) Chemical Graph Theory, 2nd ed. CRC Press"
        },
        {
          "doi": "10.1021/ja00856a001",
          "note": "Randić (1975) On characterization of molecular branching. J Am Chem Soc 97:6609"
        }
      ],
      "last_reviewed": "2026-05-06",
      "file": "cross-domain/chemistry-mathematics/b-graph-theory-molecular-properties.yaml"
    },
    {
      "id": "b-molecular-dynamics-statistical-sampling",
      "title": "Molecular dynamics is applied Hamiltonian mechanics — symplectic integrators, free energy perturbation, and metadynamics connect statistical mechanics theory to computational drug discovery",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "Molecular dynamics (MD) numerically integrates Hamilton's equations for N-atom systems. The Verlet algorithm r(t+Δt) = 2r(t) - r(t-Δt) + F(t)Δt²/m is a second-order symplectic integrator: it preserves the symplectic structure of phase space (Liouville's theorem) and conserves a shadow Hamiltonian — ",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-metadynamics-collective-variables-protein-allostery"
      ],
      "communication_gap": "Computational chemists using MD publish in Journal of Chemical Theory and Computation, Journal of Physical Chemistry, and Journal of Chemical Information and Modeling; applied mathematicians studying symplectic integration publish in SIAM Journal on Scientific Computing and Foundations of Computational Mathematics. The mathematical foundations of enhanced sampling methods are rarely discussed in chemistry-focused MD papers, and mathematical integration theory rarely addresses force-field accuracy limitations.\n",
      "translation_table": [
        {
          "field_a_term": "symplectic integrator / Verlet algorithm (mathematics)",
          "field_b_term": "stable long-time MD simulation (computational chemistry)",
          "note": "Symplectic integrators conserve a modified Hamiltonian — preventing energy drift that would corrupt thermodynamics"
        },
        {
          "field_a_term": "free energy perturbation / Zwanzig formula (statistical mechanics)",
          "field_b_term": "binding affinity ΔG prediction (drug discovery)",
          "note": "FEP is the gold standard for computing relative binding free energies — directly used in lead optimization"
        },
        {
          "field_a_term": "metadynamics bias potential (adaptive importance sampling)",
          "field_b_term": "conformational sampling / protein folding pathway (chemistry)",
          "note": "The time-integrated bias estimates the free energy surface — revealing folding intermediates"
        },
        {
          "field_a_term": "weighted histogram analysis (WHAM, statistical estimation)",
          "field_b_term": "potential of mean force along reaction coordinate (chemistry)",
          "note": "WHAM combines multiple biased simulations into a statistically optimal free energy profile"
        },
        {
          "field_a_term": "Liouville's theorem (Hamiltonian mechanics)",
          "field_b_term": "phase space volume conservation (statistical mechanics)",
          "note": "Phase space incompressibility ensures ergodicity and validity of statistical mechanical averages"
        }
      ],
      "references": [
        {
          "doi": "10.1103/PhysRev.159.98",
          "note": "Verlet (1967). Computer 'experiments' on classical fluids. I. Thermodynamical properties of Lennard-Jones molecules. Phys Rev 159:98."
        },
        {
          "doi": "10.1063/1.1740359",
          "note": "Zwanzig (1954). High-temperature equation of state by a perturbation method. I. Nonpolar gases. J Chem Phys 22:1420."
        },
        {
          "doi": "10.1073/pnas.202427399",
          "note": "Laio & Parrinello (2002). Escaping free-energy minima. PNAS 99:12562."
        },
        {
          "doi": "10.1002/jcc.540130812",
          "note": "Kumar et al. (1992). The weighted histogram analysis method for free-energy calculations on biomolecules. J Comput Chem 13:1011."
        }
      ],
      "last_reviewed": "2026-05-06",
      "file": "cross-domain/chemistry-mathematics/b-molecular-dynamics-statistical-sampling.yaml"
    },
    {
      "id": "b-molecular-dynamics-x-stochastic-thermostats",
      "title": "Molecular dynamics thermostats (Nosé–Hoover, Langevin, Andersen) are designed as stochastic or extended deterministic dynamics whose invariant distributions approximate the canonical ensemble, bridging chemistry simulations to stochastic differential equations.",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "Microcanonical MD conserves energy; coupling to heat baths requires additional degrees of freedom or stochastic kicks. Langevin dynamics adds friction and Gaussian noise, formally an SDE whose underdamped/overdamped limits trade accuracy for sampling efficiency. Nosé–Hoover deterministically extends",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-nose-hoover-chains-match-target-kinetic-spectra-when-tuned"
      ],
      "communication_gap": "Chemistry software defaults hide thermostat choice; mathematics SDE literature rarely names popular MD packages, so timestep pathology reports are fragmented.",
      "translation_table": [
        {
          "field_a_term": "Langevin friction γ",
          "field_b_term": "coupling strength to implicit solvent / thermostat"
        },
        {
          "field_a_term": "Wiener process term",
          "field_b_term": "stochastic collision kicks in Andersen thermostat"
        },
        {
          "field_a_term": "Fokker–Planck stationary solution",
          "field_b_term": "target Maxwell–Boltzmann velocity distribution"
        }
      ],
      "references": [
        {
          "doi": "10.1103/PhysRevA.31.1695",
          "note": "Hoover (1985) — canonical dynamics via Nosé–Hoover thermostat"
        },
        {
          "doi": "10.1080/00268978400101201",
          "note": "Nosé (1984) — unified formulation of deterministic constant temperature MD"
        }
      ],
      "last_reviewed": "2026-05-07",
      "file": "cross-domain/chemistry-mathematics/b-molecular-dynamics-x-stochastic-thermostats.yaml"
    },
    {
      "id": "b-molecular-spectroscopy-x-matrix-diagonalization",
      "title": "Vibrational spectroscopy of polyatomic molecules reduces to eigenvalue problems — mass-weighted Hessian matrices yield normal-mode frequencies (harmonic approximation), while quantum electronic states diagonalize molecular Hamiltonians in chosen bases — making linear algebra (orthogonal transformations, matrix spectra) the shared engine behind IR/Raman selection rules and computational chemistry routines.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "In the harmonic approximation, nuclear vibrations satisfy generalized eigenvalue equations involving mass-weighted second derivatives of potential energy — frequencies ω_i are square roots of eigenvalues after diagonalization. Electronic absorption/emission lines correspond to differences between ei",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-molecular-spectroscopy-x-matrix-diagonalization"
      ],
      "communication_gap": "Undergraduate chemistry spectroscopy labs emphasize empirical peak assignments while mathematics curricula teach diagonalization abstractly with less molecular Hamiltonian motivation.\n",
      "translation_table": [
        {
          "field_a_term": "Normal-mode coordinates Q_i",
          "field_b_term": "Eigenvectors of mass-weighted Hessian matrix",
          "note": "Decouples coupled Cartesian motions into independent harmonic oscillators."
        },
        {
          "field_a_term": "Fundamental IR frequencies ν_i",
          "field_b_term": "√(λ_i)/(2π) from eigenvalues λ_i of dynamical matrix",
          "note": "Spectral decomposition yields predicted peak positions used to validate DFT harmonic frequencies."
        },
        {
          "field_a_term": "Molecular orbital energies ε_j (computational chemistry)",
          "field_b_term": "Eigenvalues of Fock operator matrix in atomic orbital basis",
          "note": "Hartree–Fock and beyond rest on repeated diagonalization plus self-consistency."
        }
      ],
      "references": [
        {
          "doi": "10.1063/1.1740823",
          "note": "Wilson, Decius & Cross tradition — molecular vibrations via FG matrix methods (classic)"
        },
        {
          "doi": "10.1063/1.442713",
          "note": "Pople et al. Gaussian lineage — representative molecular orbital matrix formulation era paper"
        }
      ],
      "last_reviewed": "2026-05-07",
      "file": "cross-domain/chemistry-mathematics/b-molecular-spectroscopy-x-matrix-diagonalization.yaml"
    },
    {
      "id": "b-reaction-diffusion-excitable-media-bz",
      "title": "The Belousov-Zhabotinsky reaction is the paradigmatic chemical excitable medium: the Oregonator model reduces it to a two-variable activator-inhibitor reaction- diffusion system whose spiral waves, scroll waves, and Turing patterns are mathematically identical to cardiac arrhythmias, neural firing propagation, and developmental morphogenesis patterns.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "An excitable medium is a spatially distributed system with three states: resting (stable), excited (autocatalytic), and refractory (recovery). The Oregonator equations for the BZ reaction — d_u/dt = (1/epsilon)(u - u^2 - fv(u-q)/(u+q)) + D_u nabla^2 u; d_v/dt = u - v + D_v nabla^2 v — are isomorphic",
      "open_unknowns": [],
      "related_hypotheses": [],
      "communication_gap": "Chemists who study BZ kinetics rarely engage with the generic excitable medium literature in mathematical biology and physics; mathematical modellers use simplified two-variable models that lose quantitative connection to reaction mechanism. Cross-disciplinary validation of Oregonator predictions against BZ experiments is sparse.\n",
      "translation_table": [
        {
          "field_a_term": "activator-inhibitor (u, v) (mathematics / nonlinear dynamics)",
          "field_b_term": "HBrO2 autocatalyst and Ce^4+ / Br^- inhibitor in BZ reaction (chemistry)",
          "note": "The two-variable Oregonator maps exactly to the activator-inhibitor class"
        },
        {
          "field_a_term": "Turing instability (mathematics)",
          "field_b_term": "stationary concentration patterns in BZ with equal diffusion rates (chemistry)",
          "note": "Turing patterns observed in BZ-AOT microemulsion systems (Vanag & Epstein 2001)"
        },
        {
          "field_a_term": "spiral wave (mathematics)",
          "field_b_term": "rotating chemical wave in BZ dish (chemistry)",
          "note": "Identical topology; BZ spirals used to calibrate generic spiral wave theory"
        },
        {
          "field_a_term": "excitability threshold (mathematics)",
          "field_b_term": "minimum perturbation to trigger BZ wave front (chemistry)",
          "note": "Sub-threshold perturbations decay; super-threshold ones propagate - same in neurons"
        }
      ],
      "references": [
        {
          "doi": "10.1039/ft9959104433",
          "note": "Epstein & Pojman (1998) - An Introduction to Nonlinear Chemical Dynamics; BZ Oregonator"
        },
        {
          "doi": "10.1126/science.1257954",
          "note": "Vanag & Epstein (2001) - pattern formation in a tunable medium; BZ-AOT Turing patterns"
        },
        {
          "doi": "10.1103/PhysRevLett.72.2above",
          "note": "Winfree (1994) - electrical turbulence in 3D heart muscle; BZ analogy"
        }
      ],
      "last_reviewed": "2026-05-07",
      "file": "cross-domain/chemistry-mathematics/b-reaction-diffusion-excitable-media-bz.yaml"
    },
    {
      "id": "b-reaction-diffusion-pattern-formation",
      "title": "Turing's reaction-diffusion instability shows that two reacting chemicals with different diffusion rates can spontaneously break spatial symmetry, generating the periodic patterns seen in animal coat markings, limb development, and arid vegetation bands.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "The Turing instability (1952) in a two-component reaction-diffusion system: activator u with slow diffusion D_u and inhibitor v with fast diffusion D_v. The homogeneous steady state is stable without diffusion (f_u + g_v < 0) but becomes unstable with diffusion when D_v g_v + D_u f_u − 2√(D_u D_v(f_",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-turing-instability-aerosol-nucleation"
      ],
      "communication_gap": "Turing's 1952 paper appeared in a mathematics journal and was ignored by biologists for two decades. Developmental biology and ecology now use reaction-diffusion models, but atmospheric chemistry has not yet adopted Turing analysis to interpret particle formation burst events, despite the mathematical equivalence.\n",
      "translation_table": [
        {
          "field_a_term": "activator species u (slow diffusion)",
          "field_b_term": "morphogen / vegetation / resource",
          "note": "Short-range activation — promotes own production and that of the inhibitor"
        },
        {
          "field_a_term": "inhibitor species v (fast diffusion)",
          "field_b_term": "inhibitory morphogen / grazing / depleted resource",
          "note": "Long-range inhibition — suppresses activator at distance"
        },
        {
          "field_a_term": "critical wavenumber k_c",
          "field_b_term": "pattern spatial frequency (inverse wavelength)",
          "note": "Set by the ratio of diffusion constants and reaction rates"
        },
        {
          "field_a_term": "Turing instability condition",
          "field_b_term": "pattern-formation threshold",
          "note": "Requires D_v/D_u >> 1; biological systems tune this ratio developmentally"
        }
      ],
      "references": [
        {
          "doi": "10.1098/rstb.1952.0012",
          "note": "Turing, A.M. (1952). The chemical basis of morphogenesis. Phil Trans R Soc B 237:37."
        },
        {
          "doi": "10.1007/BF00289234",
          "note": "Gierer, A. & Meinhardt, H. (1972). A theory of biological pattern formation. Kybernetik 12:30."
        },
        {
          "note": "Murray, J.D. (2003). Mathematical Biology II: Spatial Models and Biomedical Applications. Springer."
        },
        {
          "doi": "10.1038/376765a0",
          "note": "Kondo, S. & Asai, R. (1995). A reaction-diffusion wave on the skin of the marine angelfish Pomacanthus. Nature 376:765."
        }
      ],
      "last_reviewed": "2026-05-06",
      "file": "cross-domain/chemistry-mathematics/b-reaction-diffusion-pattern-formation.yaml"
    },
    {
      "id": "b-reaction-network-graph-theory",
      "title": "Chemical reaction networks are directed hypergraphs whose steady-state multiplicity and oscillatory behavior are entirely determined by the network topology via the Feinberg-Horn-Jackson deficiency theory — making graph-theoretic invariants (deficiency number, linkage classes, strong linkage) the decisive predictors of chemical dynamics.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "A chemical reaction network (CRN) is a directed graph whose nodes are \"complexes\" (multisets of species, e.g. A + 2B) and edges are reactions. The Feinberg-Horn-Jackson (FHJ) deficiency theory (1972) provides a complete classification of which network topologies can admit multiple steady states, osc",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-crn-oscillator-design"
      ],
      "communication_gap": "Feinberg (1972) and Horn & Jackson (1972) published their deficiency theory in applied mathematics journals (Arch Rational Mech Anal) not read by chemists or biologists. The theory requires graph theory and linear algebra notation unfamiliar to most chemists. Chemical reaction network theory (CRNT) remained a speciality of mathematical chemistry for 30 years until the systems biology era (2000-present) began connecting it to biological networks. Most synthetic biologists who design genetic circuits remain unaware that the circuit topology alone determines whether bistability is achievable.\n",
      "translation_table": [
        {
          "field_a_term": "Complex (multiset of species, e.g. A + 2B)",
          "field_b_term": "Node in the reaction graph",
          "note": "Distinct complexes = distinct nodes; same species appearing in different reactions create different nodes"
        },
        {
          "field_a_term": "Reaction A + B → C + D",
          "field_b_term": "Directed edge from node (A+B) to node (C+D)",
          "note": "Edge weight = rate constant; reversible reaction = pair of antiparallel edges"
        },
        {
          "field_a_term": "Linkage class",
          "field_b_term": "Connected component of the undirected complex graph",
          "note": "Represents a self-contained reaction subsystem with its own conservation laws"
        },
        {
          "field_a_term": "Deficiency δ = n - l - s",
          "field_b_term": "Topological complexity measure of the reaction network",
          "note": "δ = 0 guarantees unique stable equilibrium (thermodynamic); δ ≥ 1 allows multistability or oscillation"
        },
        {
          "field_a_term": "Weakly reversible network",
          "field_b_term": "Strongly connected directed graph (every node on a cycle)",
          "note": "Weakly reversible + δ = 0 = complex-balanced equilibrium exists"
        },
        {
          "field_a_term": "Stoichiometric subspace",
          "field_b_term": "Image of the incidence matrix of the directed complex graph",
          "note": "Constrains which steady states are reachable from given initial conditions"
        },
        {
          "field_a_term": "Oscillatory CRN (e.g. Brusselator, Oregonator)",
          "field_b_term": "CRN graph with a Hopf bifurcation structure: non-weakly-reversible, δ ≥ 1",
          "note": "Stability of steady states determined by eigenvalues of Jacobian; Hopf criterion is algebraic in rate constants"
        }
      ],
      "references": [
        {
          "doi": "10.1007/BF00251225",
          "note": "Horn & Jackson (1972) Arch Rational Mech Anal 47:81 — deficiency zero theorem; complex-balanced equilibria"
        },
        {
          "doi": "10.1007/BF00251226",
          "note": "Feinberg (1972) Arch Rational Mech Anal 49:187 — chemical reaction network theory; deficiency and dynamics"
        },
        {
          "note": "Érdi & Tóth (1989) Mathematical Models of Chemical Reactions (Manchester UP) — comprehensive treatment",
          "url": "https://www.mup.ac.uk/9780719022081"
        },
        {
          "doi": "10.1073/pnas.86.1.67",
          "note": "Feinberg (1989) Proc Natl Acad Sci 86:67 — necessary and sufficient conditions for complex balance"
        },
        {
          "doi": "10.1137/0152013",
          "note": "Feinberg (1991) SIAM J Appl Math — multiple steady states and deficiency one networks"
        }
      ],
      "last_reviewed": "2026-05-05",
      "file": "cross-domain/chemistry-mathematics/b-reaction-network-graph-theory.yaml"
    },
    {
      "id": "b-thermodynamics-convex-analysis",
      "title": "Classical thermodynamics is a special case of convex duality: the Legendre transform relating U(S,V,N) to Helmholtz and Gibbs free energies is identical to the Legendre-Fenchel transform in convex analysis, and thermodynamic stability conditions are equivalent to convexity constraints on the fundamental relation.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "The fundamental thermodynamic relation dU = TdS - PdV + μdN expresses internal energy U as a function of extensive variables (S, V, N). The thermodynamic potentials are Legendre transforms: Helmholtz free energy A(T,V,N) = U - TS (transform in S, replacing S with T = ∂U/∂S); Gibbs free energy G(T,P,",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-thermodynamics-non-convex-regions-phase-coexistence"
      ],
      "communication_gap": "Thermodynamics is taught to chemists, chemical engineers, and physicists using 19th-century notation (exact differentials, Maxwell relations) that obscures the underlying convex geometry. Convex analysis is a pure mathematics and operations research topic; the isomorphism with thermodynamics is known to mathematical physicists (e.g. Wightman 1979) but not taught in chemistry or engineering curricula. Jaynes's Bayesian re-foundation of statistical mechanics is known to information theorists but not routinely to physical chemists.\n",
      "translation_table": [
        {
          "field_a_term": "Legendre-Fenchel transform (convex analysis)",
          "field_b_term": "thermodynamic potential (Helmholtz, Gibbs, enthalpy)",
          "note": "Each thermodynamic potential is the Legendre transform of U with respect to one or more extensive variables"
        },
        {
          "field_a_term": "convexity of f (f'' ≥ 0)",
          "field_b_term": "thermodynamic stability (positive heat capacity, positive compressibility)",
          "note": "Phase transitions occur at boundaries of the convex region; Maxwell equal-area construction is the convex envelope"
        },
        {
          "field_a_term": "conjugate variable pair (x, y = ∂f/∂x)",
          "field_b_term": "conjugate thermodynamic pair (S,T), (V,P), (N,μ)",
          "note": "Each Legendre transform swaps one extensive variable for its conjugate intensive variable"
        },
        {
          "field_a_term": "constrained optimisation with Lagrange multiplier",
          "field_b_term": "maximum entropy principle (Jaynes) — entropy maximised at fixed ⟨E⟩",
          "note": "Lagrange multiplier β = ∂S/∂E = 1/k_BT is inverse temperature"
        },
        {
          "field_a_term": "Euler's theorem for homogeneous functions",
          "field_b_term": "Gibbs-Duhem relation SdT - VdP + Ndμ = 0",
          "note": "Thermodynamic potentials are first-order homogeneous in extensive variables; Gibbs-Duhem follows from Euler's theorem"
        }
      ],
      "references": [
        {
          "note": "Callen, H.B. (1985) Thermodynamics and an Introduction to Thermostatistics, 2nd ed. Wiley."
        },
        {
          "note": "Rockafellar, R.T. (1970) Convex Analysis. Princeton University Press."
        },
        {
          "doi": "10.1103/PhysRev.106.620",
          "note": "Jaynes, E.T. (1957) Phys Rev 106:620 — information theory and statistical mechanics; maximum entropy principle"
        },
        {
          "note": "Wightman, A.S. (1979) Convexity and the Notion of Equilibrium State in Thermodynamics and Statistical Mechanics. In Israel (1979) Convexity in the Theory of Lattice Gases."
        }
      ],
      "last_reviewed": "2026-05-06",
      "file": "cross-domain/chemistry-mathematics/b-thermodynamics-convex-analysis.yaml"
    },
    {
      "id": "b-topological-data-analysis-x-catalyst-state-space-screening",
      "title": "Topological data analysis provides cross-domain structure discovery for catalyst state-space screening.",
      "source_domain": "",
      "target_domain": "",
      "status": "proposed",
      "bridge_claim": "Speculative analogy: Topological data analysis provides cross-domain structure discovery for catalyst state-space screening.",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-persistence-based-features-improve-active-catalyst-hit-rate-in-high-throughput-screening"
      ],
      "communication_gap": "The two communities use different notation, benchmarks, and publication venues, which obscures transferable mathematical structure.",
      "translation_table": [],
      "references": [
        {
          "doi": "10.1007/s10208-008-9025-5",
          "note": "Persistent homology stability theorem."
        }
      ],
      "last_reviewed": "2026-05-08",
      "file": "cross-domain/chemistry-mathematics/b-topological-data-analysis-x-catalyst-state-space-screening.yaml"
    },
    {
      "id": "b-turing-completeness-chemical-reaction-networks",
      "title": "Chemical reaction networks (CRNs) are Turing-complete: any computable function can be implemented by a finite set of molecular species and mass-action reactions, bridging theoretical computer science and chemistry.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "Soloveichik et al. (2008) proved that stochastic CRNs are Turing-complete: given arbitrary initial molecule counts, a finite CRN can simulate any register machine and hence compute any computable function. Deterministic CRNs (ODEs) can implement continuous analog computation. This means the chemistr",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-dna-strand-displacement-universal-computation"
      ],
      "communication_gap": "Chemists and biochemists working on reaction networks rarely engage with theoretical computer science, and computer scientists working on molecular computation rarely read physical chemistry journals; the DNA computing community has bridged this gap experimentally but the broader theoretical connection remains underexploited.\n",
      "translation_table": [
        {
          "field_a_term": "molecular species population (chemistry)",
          "field_b_term": "register value in register machine (computer science)",
          "note": "Copy-number of a species encodes an integer; reactions implement increment/decrement"
        },
        {
          "field_a_term": "bimolecular reaction A + B → C (chemistry)",
          "field_b_term": "conditional decrement / test-and-branch instruction (computer science)",
          "note": "Catalytic and annihilation reactions implement the primitives of a register machine"
        },
        {
          "field_a_term": "chemical equilibrium / steady state (chemistry)",
          "field_b_term": "halting state of a computation (computer science)",
          "note": "The computation halts when the system reaches a designated species-count indicator"
        },
        {
          "field_a_term": "DNA strand displacement cascade (chemistry)",
          "field_b_term": "logic gate / Boolean circuit (computer science)",
          "note": "DNA toeholds implement OR, AND, NOT gates; fan-out is achieved by signal amplification"
        }
      ],
      "references": [
        {
          "doi": "10.1145/1374376.1374480",
          "note": "Soloveichik et al. (2008) — Turing universality of stochastic CRNs"
        },
        {
          "doi": "10.1126/science.1200962",
          "note": "Qian & Winfree (2011) — molecular logic gates via DNA strand displacement"
        },
        {
          "doi": "10.1145/2422375.2422400",
          "note": "Chen et al. (2014) — deterministic function computation with CRNs"
        }
      ],
      "last_reviewed": "2026-05-07",
      "file": "cross-domain/chemistry-mathematics/b-turing-completeness-chemical-reaction-networks.yaml"
    },
    {
      "id": "b-fluorescence-lifetime-x-mri-t2-star-dephasing",
      "title": "Fluorescence lifetime imaging resolves exponential decay times τ of excited-state populations — MRI T2* relaxation reflects irreversible and reversible dephasing (including local field inhomogeneity broadening) altering transverse magnetization decay times — both disciplines estimate characteristic decay constants from noisy exponential fitting though microscopic mechanisms (radiative vs spin physics) differ entirely.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "FLIM treats intensity decay I(t) ∝ exp(−t/τ_f) across pixels for quantitative molecular microenvironment sensing — T2* maps encode tissue-dependent transverse relaxation rates 1/T2* derived from GRE signal decay envelopes — inverse problems share exponential fitting, partial-volume bias, multi-expon",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-shared-biexponential-fitting-bias-function-across-modalities-same-snr"
      ],
      "communication_gap": "Fluorescence microscopy core facilities rarely train clinicians in magnetization physics; radiology physics labs seldom cite Lakowicz — exponential fitting conferences could unify statistical methodology.\n",
      "translation_table": [
        {
          "field_a_term": "Fluorescence lifetime τ_f distribution (heterogeneous mixture)",
          "field_b_term": "T2* distribution map from gradient-echo decay envelopes",
          "note": "Multi-exponential ambiguity appears in both modalities."
        },
        {
          "field_a_term": "FRET quenching shortening τ_f (proximity sensing)",
          "field_b_term": "Susceptibility-induced local ΔB shortening T2* (iron deposition etc.)",
          "note": "Environmental perturbations shorten apparent decay constants analogously at phenomenological level."
        },
        {
          "field_a_term": "Instrument response function / IRF deconvolution in TCSPC",
          "field_b_term": "RF pulse profile + gradient spoiling shaping observed GRE decay",
          "note": "Hardware kernels must be unfolded before biology inference."
        }
      ],
      "references": [
        {
          "doi": "10.1016/S0076-6879(06)36009-9",
          "note": "Lakowicz (2006) — principles of fluorescence lifetime imaging methods (Methods Enzymol.)"
        },
        {
          "doi": "10.1002/mrm.1910360409",
          "note": "Haacke et al. context — MR susceptibility / T2* mapping methods for tissue iron quantification (representative MR literature)"
        }
      ],
      "last_reviewed": "2026-05-07",
      "file": "cross-domain/chemistry-medicine/b-fluorescence-lifetime-x-mri-t2-star-dephasing.yaml"
    },
    {
      "id": "b-ocean-acidification-carbonate-chemistry",
      "title": "Ocean acidification from anthropogenic CO2 uptake is quantified by carbonate chemistry equilibria: dissolved CO2 drives the reaction CO2 + H2O ⇌ H2CO3 ⇌ HCO3^- + H^+ ⇌ CO3^{2-} + 2H^+, decreasing pH by Δ[H^+] = -K_1*K_2*[CO2]/(K_1*[H^+] + [H^+]^2) and reducing aragonite saturation state Ω_arag = [Ca^2+][CO3^{2-}]/K_sp threatening calcification by reef-building organisms",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "The ocean carbonate system is a set of coupled equilibria: CO2(aq) + H2O ⇌ H2CO3 (K_0), H2CO3 ⇌ H^+ + HCO3^- (K_1 = 10^{-6.35}), HCO3^- ⇌ H^+ + CO3^{2-} (K_2 = 10^{-10.33}); rising atmospheric pCO2 drives dissolution, increasing [H2CO3] and [HCO3^-] while decreasing [CO3^{2-}], lowering pH by ~0.1 u",
      "open_unknowns": [],
      "related_hypotheses": [],
      "communication_gap": "Marine chemists measure carbonate parameters in seawater using instrument calibrations for CO2, pH, and alkalinity while oceanographers model large-scale ocean acidification impacts on ecosystems; communicating the physical chemistry of carbonate equilibria to ecologists studying calcification impacts requires bridging thermodynamic constants to organism-scale biology.",
      "translation_table": [
        {
          "field_a_term": "ocean pH decrease (oceanography)",
          "field_b_term": "proton equilibrium shift from carbonate acid-base system pK values (chemistry)",
          "note": "ΔpH = -Δlog[H^+]; as CO2 increases, the Bjerrum plot shifts, increasing [H^+] and decreasing [CO3^{2-}]"
        },
        {
          "field_a_term": "aragonite saturation state Ω_arag (oceanography)",
          "field_b_term": "ion activity product divided by solubility product K_sp (chemistry)",
          "note": "Ω_arag < 1 means seawater is undersaturated with respect to aragonite and will dissolve CaCO3 shells"
        },
        {
          "field_a_term": "ocean's buffering capacity (alkalinity) (oceanography)",
          "field_b_term": "total alkalinity TA = [HCO3^-] + 2[CO3^{2-}] + [B(OH)4^-] + [OH^-] - [H^+] (chemistry)",
          "note": "Alkalinity is the charge balance that buffers pH; ocean acidification reduces buffering at fixed TA when CO2 increases"
        },
        {
          "field_a_term": "deep-ocean CaCO3 lysocline (oceanography)",
          "field_b_term": "depth at which Ω = 1, calculated from thermodynamic K_sp pressure dependence (chemistry)",
          "note": "K_sp increases with pressure and decreasing temperature; lysocline is the depth where calcite/aragonite dissolves spontaneously"
        }
      ],
      "references": [
        {
          "doi": "10.1126/science.1155847",
          "note": "Fabry et al. (2008) Science - impacts of ocean acidification on marine fauna"
        },
        {
          "doi": "10.5194/bg-4-655-2007",
          "note": "Orr et al. (2005) - anthropogenic ocean acidification over 21st century"
        },
        {
          "doi": "10.1016/j.marchem.2007.01.006",
          "note": "Dickson et al. (2007) - Guide to Best Practices for Ocean CO2 Measurements"
        }
      ],
      "last_reviewed": "2026-05-07",
      "file": "cross-domain/chemistry-oceanography/b-ocean-acidification-carbonate-chemistry.yaml"
    },
    {
      "id": "b-catalysis-x-transition-state-theory",
      "title": "Catalysis x Transition state theory — activation energy landscape\n",
      "source_domain": "",
      "target_domain": "",
      "status": "proposed",
      "bridge_claim": "Enzymatic catalysis and heterogeneous surface catalysis both lower activation energy by stabilizing the transition state; the Eyring-Polanyi equation k = (kT/h)exp(-DeltaG_dag/RT) is the universal bridge between molecular structure and reaction rate in both contexts.\n",
      "open_unknowns": [],
      "related_hypotheses": [],
      "communication_gap": "Biochemists and heterogeneous catalysis chemists developed separate kinetic frameworks (Michaelis-Menten vs Langmuir-Hinshelwood) despite both being limiting cases of transition state theory, creating parallel vocabularies for the same physics.\n",
      "translation_table": [
        {
          "field_a_term": "Enzyme active site transition state stabilization",
          "field_b_term": "Heterogeneous catalyst surface adsorption energy",
          "note": "Both lower DeltaG_dag by the same energy stabilization mechanism; the Brønsted-Evans-Polanyi relation in surface catalysis parallels Pauling's complementarity principle for enzymes.\n"
        },
        {
          "field_a_term": "Michaelis-Menten k_cat",
          "field_b_term": "Turnover frequency (TOF) per active site",
          "note": "Both measure the per-site rate at saturation; k_cat and TOF are defined identically via the Eyring equation once DeltaG_dag is known.\n"
        }
      ],
      "references": [
        {
          "doi": "10.1039/tf9353100875",
          "note": "Eyring (1935) — activated complex theory; the foundational Eyring-Polanyi equation"
        }
      ],
      "last_reviewed": "2026-05-07",
      "file": "cross-domain/chemistry-physics/b-catalysis-x-transition-state-theory.yaml"
    },
    {
      "id": "b-colloidal-systems-soft-matter",
      "title": "Colloidal dispersions are a model system where DLVO electrostatic-van der Waals competition controls stability, hard-sphere entropy drives a purely athermal fluid-crystal phase transition at phi = 0.494, and colloidal glasses at phi = 0.64 are experimental realisations of the glass transition, making colloidal physics the bridge between chemistry and condensed-matter statistical mechanics.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "Colloidal systems (particle diameter 1 nm – 1 μm) are large enough to be imaged by optical microscopy and small enough to undergo Brownian motion, making them ideal model systems for testing statistical-mechanical theories that are otherwise inaccessible at the atomic scale.\nDLVO theory (Derjaguin, ",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-dlvo-failure-short-range-attractions-gels"
      ],
      "communication_gap": "Colloidal chemistry developed as an applied field (pigments, foods, pharmaceuticals, coatings) largely independently of the condensed-matter statistical mechanics that provides its theoretical foundation. The entropy-driven crystallisation result (Pusey & van Megen 1986) appeared in Nature but took years to percolate into chemistry textbooks. The glass transition work by van Blaaderen and Wiltzius (1995) using confocal microscopy appeared in Nature but is not yet standard in physical chemistry curricula. The DLVO framework is well-known in colloid chemistry but its connection to Debye-Hückel theory (electrolyte physics) is rarely made explicit in either field's pedagogy.\n",
      "translation_table": [
        {
          "field_a_term": "Debye screening length κ⁻¹ (controlled by salt concentration)",
          "field_b_term": "effective range of inter-particle repulsion (controls phase behaviour)",
          "note": "chemistry variable (ionic strength) directly maps to physics control parameter of phase diagram"
        },
        {
          "field_a_term": "Hamaker constant A (van der Waals attraction strength)",
          "field_b_term": "depth of interaction potential well U_min",
          "note": "A is a material chemistry property; U_min / kT determines colloidal stability ratio W"
        },
        {
          "field_a_term": "volume fraction φ (particle packing)",
          "field_b_term": "order parameter of colloidal fluid-crystal transition",
          "note": "φ = 0.494 (freezing) and φ = 0.545 (melting) are universal for hard spheres"
        },
        {
          "field_a_term": "colloidal crystal (FCC opal)",
          "field_b_term": "photonic crystal with bandgap in visible spectrum",
          "note": "lattice spacing d ~ 200-400 nm comparable to visible wavelengths"
        },
        {
          "field_a_term": "random close-packing φ ≈ 0.64",
          "field_b_term": "colloidal glass transition (structural arrest)",
          "note": "accessible to direct imaging by confocal microscopy unlike atomic glasses"
        },
        {
          "field_a_term": "PMMA sphere with steric stabilisation (polymer brush)",
          "field_b_term": "hard-sphere model particle (no attractive interactions)",
          "note": "chemically synthesised colloidal model for theoretical hard-sphere system"
        }
      ],
      "references": [
        {
          "note": "Derjaguin & Landau (1941) Acta Physicochim USSR 14:633 — DLVO theory (electrostatic part)"
        },
        {
          "note": "Verwey & Overbeek (1948) Theory of the Stability of Lyophobic Colloids. Elsevier, Amsterdam"
        },
        {
          "doi": "10.1038/320340a0",
          "note": "Pusey & van Megen (1986) Nature 320:340 — hard-sphere fluid-crystal and glass transitions"
        },
        {
          "note": "Russel, Saville & Schowalter (1989) Colloidal Dispersions. Cambridge University Press"
        }
      ],
      "last_reviewed": "2026-05-06",
      "file": "cross-domain/chemistry-physics/b-colloidal-systems-soft-matter.yaml"
    },
    {
      "id": "b-electrochemical-impedance-membranes",
      "title": "Electrochemical impedance spectroscopy maps directly onto equivalent-circuit models of biological membranes — the Hodgkin-Huxley ionic conductances are impedance elements, enabling label-free biosensing of living cells with the same formalism used to study corroding metal electrodes.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "Electrochemical impedance spectroscopy (EIS) applies a small AC voltage V(omega) = V0 exp(i*omega*t) and measures complex impedance Z(omega) = Z' + iZ''. The Nyquist plot (Z'' vs Z') displays a semicircle whose radius equals the charge-transfer resistance R_ct and whose high-frequency intercept give",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-eis-hodgkin-huxley-parameter-extraction"
      ],
      "communication_gap": "EIS was developed by the electrochemical corrosion community and remains primarily published in electrochemistry journals (J Electrochem Soc, Electrochim Acta). The Hodgkin-Huxley tradition publishes in physiology and neuroscience journals. Both use circuit analogies but rarely cite each other. Biosensor researchers who bridge both communities are a small, specialized group.\n",
      "translation_table": [
        {
          "field_a_term": "charge-transfer resistance R_ct",
          "field_b_term": "membrane ion-channel resistance R_m",
          "note": "Both represent resistive impedance to ion flow across an interface"
        },
        {
          "field_a_term": "double-layer capacitance C_dl",
          "field_b_term": "membrane capacitance C_m (~1 μF/cm²)",
          "note": "Lipid bilayer acts as a parallel-plate capacitor just as the electric double layer does"
        },
        {
          "field_a_term": "Warburg impedance (diffusion-limited, ~sqrt(i*omega) dependence)",
          "field_b_term": "diffusion-limited ion transport through cytoplasm or cell-substrate cleft",
          "note": "Identical frequency dependence in both systems"
        },
        {
          "field_a_term": "Nyquist semicircle radius",
          "field_b_term": "membrane resistance R_m (inversely proportional to channel open probability)",
          "note": "Drug toxicity or channel blockade shifts semicircle radius measurably"
        },
        {
          "field_a_term": "high-frequency intercept R_s",
          "field_b_term": "extracellular solution resistance",
          "note": "Sets baseline; used to normalize cell-substrate impedance measurements"
        }
      ],
      "references": [
        {
          "note": "Macdonald (1987) Impedance Spectroscopy — Wiley; foundational EIS reference"
        },
        {
          "doi": "10.1113/jphysiol.1952.sp004764",
          "note": "Hodgkin & Huxley (1952) J Physiol 117:500 — quantitative model of membrane conductances"
        },
        {
          "doi": "10.1073/pnas.88.17.7896",
          "note": "Giaever & Keese (1991) PNAS 88:7896 — impedance-based cell biosensing"
        },
        {
          "note": "Cole (1972) Membranes, Ions and Impulses — University of California Press"
        }
      ],
      "last_reviewed": "2026-05-06",
      "file": "cross-domain/chemistry-physics/b-electrochemical-impedance-membranes.yaml"
    },
    {
      "id": "b-nmr-rotating-frame-x-effective-hamiltonian",
      "title": "Nuclear magnetic resonance in the rotating frame replaces laboratory-frame Zeeman precession with effective Hamiltonians shaped by RF pulses — enabling composite pulse design and average Hamiltonian theory — closely mirroring rotating-wave approximations and engineered Hamiltonians used in quantum computing gate synthesis despite classical RF control electronics dominating bench implementations.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "Transforming into a frame rotating at carrier frequency removes fast counter-rotating terms (rotating-wave approximation), yielding tractable effective Hamiltonians used to derive broadband inversion pulses (composite pulses) analogous to robust gate pulses cancelling systematic offsets. Average Ham",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-nmr-rotating-frame-x-effective-hamiltonian"
      ],
      "communication_gap": "Chemistry NMR labs historically taught rotating-frame intuition via Bloch vectors while quantum computing curricula emphasize Hilbert-space effective Hamiltonians — reunification occurs mostly in specialized quantum sensing textbooks bridging both.\n",
      "translation_table": [
        {
          "field_a_term": "Rotating-frame Hamiltonian H_eff (NMR)",
          "field_b_term": "Effective Hamiltonian under gauge transformation U = exp(i ω t S_z) (quantum control)",
          "note": "Same unitary frame-change mathematics underlying both communities’ notation traditions."
        },
        {
          "field_a_term": "Composite pulses canceling B1 inhomogeneity",
          "field_b_term": "Robust control pulses minimizing sensitivity to amplitude errors (quantum gates)",
          "note": "Shared robustness criteria mapped onto systematic error axes rather than literal gate fidelity metrics without calibration."
        },
        {
          "field_a_term": "Average Hamiltonian Magnus expansion",
          "field_b_term": "Magnus-based gate synthesis for digital quantum simulation layers",
          "note": "Identical exponential operator expansions truncated at controlled orders."
        }
      ],
      "references": [
        {
          "doi": "10.1016/S0076-6879(89)76076-2",
          "note": "Levitt (1989) Methods in Enzymology vol 176 — composite pulses / propagators (representative NMR sequence theory chapter)"
        },
        {
          "doi": "10.1017/CBO9780511992635",
          "note": "Levitt — Spin Dynamics (comprehensive rotating-frame and average Hamiltonian theory reference)"
        }
      ],
      "last_reviewed": "2026-05-07",
      "file": "cross-domain/chemistry-physics/b-nmr-rotating-frame-x-effective-hamiltonian.yaml"
    },
    {
      "id": "b-nucleation-x-first-passage",
      "title": "Nucleation theory x First passage time - crystal nucleation as rare event\n",
      "source_domain": "",
      "target_domain": "",
      "status": "proposed",
      "bridge_claim": "Crystal nucleation from a supersaturated solution is a rare event governed by first- passage time theory; the classical nucleation theory rate J = Z * A * exp(-delta_G*/kT) (where Z is the Zeldovich factor, A is the attachment rate, and delta_G* is the free energy barrier) is a first-passage rate ov",
      "open_unknowns": [],
      "related_hypotheses": [],
      "communication_gap": "Physical chemists developing classical nucleation theory (Volmer & Weber 1926, Becker & Doring 1935) and mathematicians developing first-passage time theory and Kramers' escape rate (Kramers 1940) built parallel frameworks; the equivalence between CNT and Kramers theory was made rigorous by Hanggi et al. (1990), but modern nucleation experiments (microfluidic droplet arrays, cryo-EM of prenucleation clusters) rarely frame results in first-passage time language.\n",
      "translation_table": [
        {
          "field_a_term": "nucleation rate J = Z * A * exp(-delta_G*/kT) (classical nucleation theory)",
          "field_b_term": "inverse mean first-passage time over free energy barrier (Kramers theory)",
          "note": "J is the MFPT inverse for the process crossing the critical nucleus size; delta_G* is the barrier height in Kramers' formula"
        },
        {
          "field_a_term": "Zeldovich factor Z (classical nucleation theory)",
          "field_b_term": "curvature of free energy barrier at saddle point (Kramers theory)",
          "note": "Z = sqrt(|d^2 G/dn^2| / (2 pi kT)) at n* — the barrier curvature that appears in Kramers' rate formula"
        },
        {
          "field_a_term": "critical nucleus size n* (nucleation theory)",
          "field_b_term": "saddle point of free energy landscape (stochastic dynamics)",
          "note": "n* is the saddle point where the barrier tops; nuclei below n* are subcritical (dissolve), above n* grow (committed nucleus)"
        },
        {
          "field_a_term": "induction time tau_ind before first nucleation event (experiments)",
          "field_b_term": "mean first-passage time (MFPT) for rare barrier-crossing event (mathematics)",
          "note": "Experimental induction time = MFPT; its exponential distribution confirms Poisson nucleation process with rate J"
        }
      ],
      "references": [
        {
          "doi": "10.1063/1.1744102",
          "note": "Turnbull & Fisher (1949) - Rate of nucleation in condensed systems; J Chem Phys 17:71 — CNT applied to crystallization"
        }
      ],
      "last_reviewed": "2026-05-07",
      "file": "cross-domain/chemistry-physics/b-nucleation-x-first-passage.yaml"
    },
    {
      "id": "b-percolation-threshold-x-polymer-gelation",
      "title": "Random bond percolation maps gelation of branched polymers near the sol–gel transition — connectivity emerges above a critical fraction p_c of bonded sites/links — mirroring Flory–Stockmayer gel theory where number-average divergences signal infinite molecular weight clusters at the same topological connectivity threshold language used in polymer chemistry pedagogy.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "Percolation theory quantifies emergence of a spanning cluster on lattices or random graphs as bond probability crosses p_c. Gelation treats pairwise bonds between monomer units; near the transition the gel fraction rises sharply while viscosity diverges — physicists often describe this as percolatio",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-percolation-threshold-x-polymer-gelation"
      ],
      "communication_gap": "Polymer textbooks emphasize functional-group chemistry and classical gel equations while statistical mechanics curricula teach percolation on abstract graphs with less molecular chemistry vocabulary.\n",
      "translation_table": [
        {
          "field_a_term": "Percolation probability p vs occupied bonds",
          "field_b_term": "Extent of reaction / fraction of functional groups reacted in polymerization",
          "note": "Gel point corresponds to connectivity threshold analogous to p → p_c from below."
        },
        {
          "field_a_term": "Spanning cluster mass scaling near p_c",
          "field_b_term": "Gel fraction and weight-average molecular weight divergence",
          "note": "Divergent connectivity drives rheological gelation signatures."
        },
        {
          "field_a_term": "Correlation length ξ ~ |p − p_c|^{-ν}",
          "field_b_term": "Mesh correlation length / blob size growth approaching gel point",
          "note": "Analogous diverging length scales govern diverging viscosity near gelation."
        },
        {
          "field_a_term": "Bethe lattice percolation exponents",
          "field_b_term": "Classical Flory–Stockmayer tree statistics (loop-free approximation)",
          "note": "Exact agreement on trees; deviations quantify loop effects in real polymers."
        }
      ],
      "references": [
        {
          "doi": "10.1103/RevModPhys.41.574",
          "note": "Essam (1972) Rev. Mod. Phys. — percolation theory overview"
        },
        {
          "doi": "10.1021/ma60071a016",
          "note": "Flory (1941/1942 tradition); secondary polymer gel theory citation cluster — Stockmayer gel branching statistics (historical)"
        }
      ],
      "last_reviewed": "2026-05-07",
      "file": "cross-domain/chemistry-physics/b-percolation-threshold-x-polymer-gelation.yaml"
    },
    {
      "id": "b-photocatalysis-x-semiconductor-physics",
      "title": "Photocatalysis x Semiconductor Physics - band gap engineering for solar chemistry\n",
      "source_domain": "",
      "target_domain": "",
      "status": "proposed",
      "bridge_claim": "Semiconductor photocatalysts (TiO2, BiVO4, g-C3N4) absorb photons to generate electron-hole pairs that drive redox reactions; the band gap determines which wavelengths are absorbed and whether the conduction/valence band edges straddle the redox potentials for target reactions - making photocatalyst",
      "open_unknowns": [],
      "related_hypotheses": [],
      "communication_gap": "Semiconductor physics (band theory, p-n junctions, carrier dynamics) has been developed in electrical engineering for 80 years; photocatalysis researchers often independently rediscovered semiconductor physics concepts (band gap engineering, Schottky barriers, carrier diffusion length) without importing the rigorous quantitative framework.\n",
      "translation_table": [
        {
          "field_a_term": "Photocatalyst band gap (eV)",
          "field_b_term": "Photon absorption threshold (E = hnu_min)",
          "note": "Photons with energy greater than the band gap excite electrons from valence to conduction band; the gap sets the solar spectrum fraction absorbed - TiO2 (3.2 eV) absorbs only UV (5% of solar), while g-C3N4 (2.7 eV) absorbs visible light (45% of solar).\n"
        },
        {
          "field_a_term": "Conduction band minimum (CBM, reduction potential)",
          "field_b_term": "Fermi level of excited electrons (electrochemical potential)",
          "note": "The CBM position (vs NHE) determines whether photogenerated electrons can reduce target molecules (H2 evolution: requires CBM > -0.41 V vs NHE); valence band maximum must be positive of the oxidation potential.\n"
        },
        {
          "field_a_term": "Electron-hole recombination (dark reaction)",
          "field_b_term": "Carrier lifetime (minority carrier recombination)",
          "note": "Electron-hole pairs recombine radiatively or non-radiatively on nanosecond-microsecond timescales; maximizing carrier lifetime (via heterojunction, Z-scheme, cocatalysts) is the same engineering challenge as minimizing recombination in solar cells.\n"
        },
        {
          "field_a_term": "Z-scheme photocatalyst (two-step light absorption)",
          "field_b_term": "Tandem semiconductor junction (two-absorber stack)",
          "note": "Z-scheme photocatalysts mimic natural photosynthesis (PS I + PS II) by using two semiconductors with staggered bands, analogous to a tandem solar cell; the electron mediator (IO3-/I-, Fe3+/Fe2+) connects the two half-reactions.\n"
        }
      ],
      "references": [
        {
          "doi": "10.1021/cr0001831",
          "note": "Hoffmann et al. (1995) - environmental applications of semiconductor photocatalysis; Chem Rev 95:69"
        },
        {
          "doi": "10.1039/c4cs00126e",
          "note": "Kudo & Miseki (2009) - heterogeneous photocatalyst materials for water splitting; Chem Soc Rev"
        }
      ],
      "last_reviewed": "2026-05-07",
      "file": "cross-domain/chemistry-physics/b-photocatalysis-x-semiconductor-physics.yaml"
    },
    {
      "id": "b-polymer-glass-x-jamming-transition",
      "title": "Polymer glass transition x Jamming - structural arrest as point J\n",
      "source_domain": "",
      "target_domain": "",
      "status": "proposed",
      "bridge_claim": "The glass transition in polymers and the jamming transition in dense granular media are unified by the jamming phase diagram (Liu and Nagel 1998); both are examples of kinetic arrest where the system becomes mechanically rigid without long-range order, both share diverging relaxation timescales and ",
      "open_unknowns": [],
      "related_hypotheses": [],
      "communication_gap": "Polymer chemists characterizing T_g and granular physicists studying dense flow developed separate frameworks for structural arrest; the unifying jamming phase diagram (Liu and Nagel 1998) connected these communities but the quantitative mapping between polymer fragility (Angell plot) and granular jamming criticality is still actively debated, with colloidal suspensions serving as the experimental bridge system.\n",
      "translation_table": [
        {
          "field_a_term": "glass transition temperature T_g (polymer chemistry)",
          "field_b_term": "jamming temperature axis in Liu-Nagel phase diagram (physics)",
          "note": "T_g is the polymer manifestation of the temperature-driven jamming transition; both represent kinetic arrest of structural rearrangement"
        },
        {
          "field_a_term": "alpha-relaxation time tau_alpha diverging at T_g (polymer dynamics)",
          "field_b_term": "diverging relaxation time at jamming point J (granular physics)",
          "note": "Both systems show super-Arrhenius divergence of relaxation time; Vogel-Fulcher-Tammann in polymers maps to power-law divergence near J"
        },
        {
          "field_a_term": "dynamic heterogeneity (mobile and immobile regions in glass) (polymer physics)",
          "field_b_term": "force chain heterogeneity in jammed granular media (granular physics)",
          "note": "Both systems show spatially heterogeneous dynamics with growing correlation length on approach to arrest"
        },
        {
          "field_a_term": "free volume theory of glass transition (polymer chemistry)",
          "field_b_term": "random close packing volume fraction phi_J ~ 0.64 (granular physics)",
          "note": "Polymer glass transition at T_g corresponds to free volume approaching zero, analogous to granular jamming at phi_J"
        }
      ],
      "references": [
        {
          "doi": "10.1038/31189",
          "note": "Liu & Nagel (1998) - Jamming is not just cool any more; Nature 396:21 — introduces the unified jamming phase diagram"
        }
      ],
      "last_reviewed": "2026-05-07",
      "file": "cross-domain/chemistry-physics/b-polymer-glass-x-jamming-transition.yaml"
    },
    {
      "id": "b-polymer-physics-scaling-laws",
      "title": "De Gennes' renormalization group mapping of polymer chains (N monomers) to the n→0 field theory gives the exact Flory exponent ν≈0.588 for chain size R∝N^ν; reptation theory gives viscosity η∝N³ and diffusion D∝N⁻²; Edwards' Hamiltonian maps polymer statistics to the Feynman path integral for a free quantum particle — universal scaling independent of chemical identity.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "A polymer chain of N monomers with excluded volume: the end-to-end distance R ~ N^ν. Flory theory (1949): minimize F = k_BT[R²/Nb² + b³N²/R³] gives ν = 3/(d+2) = 3/5 in d=3. De Gennes' renormalization group argument (1972) maps the partition function of a self-avoiding walk to the n→0 limit of the O",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-reptation-tube-model-constraint-release"
      ],
      "communication_gap": "Polymer physics is often taught in chemistry departments using empirical rules, while the field-theory derivations live in condensed matter physics. Industrial polymer scientists focus on rheological measurements; the connection between measured viscosity exponents and the reptation model's fundamental assumptions is not always drawn. The quantum mechanics connection (path integral ↔ polymer) is known to theoretical physicists but almost never mentioned in polymer chemistry textbooks.\n",
      "translation_table": [
        {
          "field_a_term": "polymer partition function Z = Σ exp(-βH[R(s)])",
          "field_b_term": "quantum amplitude ⟨x_f|e^{-Ht/ħ}|x_i⟩ in imaginary time",
          "note": "contour length s plays the role of imaginary time in the path integral"
        },
        {
          "field_a_term": "Flory exponent ν (chain size R ~ N^ν)",
          "field_b_term": "critical exponent η in O(n→0) field theory",
          "note": "ν and η are related by scaling relations; RG gives them exactly"
        },
        {
          "field_a_term": "tube diameter a in reptation",
          "field_b_term": "entanglement molecular weight M_e (characteristic scale)",
          "note": "a ~ √(M_e) sets the crossover from Rouse to reptation dynamics"
        },
        {
          "field_a_term": "polyelectrolyte electrostatic blob size ξ_e",
          "field_b_term": "Debye screening length λ_D in Coulomb potential",
          "note": "electrostatic blobs are subsections where electrostatics dominate over entropy"
        },
        {
          "field_a_term": "Zimm dynamics (hydrodynamic interaction included)",
          "field_b_term": "diffusion tensor in Oseen-Burgers formalism",
          "note": "hydrodynamic screening separates dilute (Zimm) from semi-dilute (Rouse) behavior"
        }
      ],
      "references": [
        {
          "doi": "10.1063/1.1747243",
          "note": "Flory (1949) The configuration of real polymer chains. J Chem Phys 17:303–310"
        },
        {
          "note": "de Gennes (1979) Scaling Concepts in Polymer Physics; Cornell University Press, Ithaca NY"
        },
        {
          "note": "Doi & Edwards (1986) The Theory of Polymer Dynamics; Oxford University Press"
        },
        {
          "note": "Rubinstein & Colby (2003) Polymer Physics; Oxford University Press"
        }
      ],
      "last_reviewed": "2026-05-06",
      "file": "cross-domain/chemistry-physics/b-polymer-physics-scaling-laws.yaml"
    },
    {
      "id": "b-quantum-chemistry-electronic-structure",
      "title": "The many-body Schrödinger equation for electrons in molecules is computationally intractable, but density functional theory (DFT) — grounded in the Hohenberg-Kohn theorem that ground state energy is an exact functional of electron density — enables practical first-principles computation of molecular structure, reaction energies, and materials properties, bridging quantum physics to all of chemistry.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "The Schrodinger equation for a molecule is exactly solvable only for H2+. DFT (Hohenberg-Kohn 1964): ground state energy E[rho] is exact functional of electron density rho(r); Kohn-Sham 1965 provides practical self-consistent equations. DFT exchange-correlation functional E_xc[rho] is the key unknow",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-dft-jacob-ladder-convergence-to-accuracy"
      ],
      "communication_gap": "DFT was developed by physicists (Hohenberg, Kohn Nobel Prizes) but is primarily used by chemists. The HK theorem and KS equations are standard physics but their implications for chemistry are often not explicitly bridged in curricula.\n",
      "translation_table": [
        {
          "field_a_term": "many-body wavefunction Psi(r1, r2, ..., rN)",
          "field_b_term": "electron density rho(r) = N integral of |Psi|^2 dr2...drN",
          "note": "HK theorem reduces N-body problem to 3D density functional"
        },
        {
          "field_a_term": "exchange-correlation hole",
          "field_b_term": "XC functional E_xc[rho]",
          "note": "key unknown in all DFT approximations"
        },
        {
          "field_a_term": "Slater determinant (Hartree-Fock)",
          "field_b_term": "Kohn-Sham non-interacting reference system",
          "note": "KS-DFT uses non-interacting electrons with exact-density exchange-correlation potential"
        }
      ],
      "references": [
        {
          "doi": "10.1103/PhysRev.136.B864",
          "note": "Hohenberg & Kohn (1964) Phys Rev 136:B864"
        },
        {
          "doi": "10.1103/PhysRev.140.A1133",
          "note": "Kohn & Sham (1965) Phys Rev 140:A1133"
        },
        {
          "note": "Szabo & Ostlund (1989) Modern Quantum Chemistry; Dover"
        },
        {
          "doi": "10.1063/1.2148954",
          "note": "Becke (2014) J Chem Phys 140:18A301"
        }
      ],
      "last_reviewed": "2026-05-06",
      "file": "cross-domain/chemistry-physics/b-quantum-chemistry-electronic-structure.yaml"
    },
    {
      "id": "b-soft-matter-liquid-crystal-order",
      "title": "Liquid crystals bridge chemistry and physics: the nematic Frank elastic energy (splay/twist/bend constants KΓéü, KΓéé, KΓéâ), the Freedericksz transition enabling LCD displays, and cholesteric structural color in beetle exoskeletons all emerge from broken orientational symmetry in anisotropic molecules.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "Liquid crystals (LCs) are intermediate phases between isotropic liquids and crystalline solids, bridging soft matter chemistry (molecular anisotropy, synthesis) and condensed matter physics (symmetry breaking, order parameters, defects). The nematic phase: long-range orientational order described by",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-cholesteric-lc-structural-color-biomimetic-photonic-applications"
      ],
      "communication_gap": "Chemists who synthesize LC molecules (liquid crystal synthesis, chiral dopants) rarely engage with the condensed matter physics of topological defects and order parameter field theory. Display engineers who optimize LCD pixel performance often do not engage with the soft matter physics of defect dynamics and Freedericksz transition mechanics. Biologists who study structural color in organisms rarely engage with the LC physics that produces it.\n",
      "translation_table": [
        {
          "field_a_term": "nematic director field n╠é (unit vector)",
          "field_b_term": "order parameter field (broken O(2) rotational symmetry)",
          "note": "n╠é and -n╠é are equivalent (head-tail symmetry); the manifold of degenerate ground states is RP┬▓"
        },
        {
          "field_a_term": "Frank elastic constants KΓéü, KΓéé, KΓéâ (pN scale)",
          "field_b_term": "stiffness of orientational distortions (splay, twist, bend)",
          "note": "derived from molecular anisotropy; K_i ~ k_B T / molecular length scale"
        },
        {
          "field_a_term": "Freedericksz transition (critical field E_c)",
          "field_b_term": "second-order phase transition in director orientation",
          "note": "switching voltage for LCD pixels derived from this; KΓéü and ╬ö╬╡ determine pixel response time"
        },
        {
          "field_a_term": "cholesteric pitch p (nm to ╬╝m range)",
          "field_b_term": "wavelength of selective reflection ╬╗ = n┬╖p",
          "note": "chiral dopant concentration sets pitch; p tunable over visible spectrum ΓåÆ structural color"
        },
        {
          "field_a_term": "topological defects (disclination lines)",
          "field_b_term": "topological charges classified by fundamental group ╧ÇΓéü(RP┬▓) = ZΓéé",
          "note": "half-integer defects cannot be removed by continuous deformation; annihilate in pairs"
        }
      ],
      "references": [
        {
          "note": "de Gennes & Prost (1993) The Physics of Liquid Crystals, 2nd ed.; Oxford University Press"
        },
        {
          "note": "Freedericksz & Tsvetkov (1927) Phys Z Sowjetunion 6:490"
        },
        {
          "doi": "10.1039/df9582500019",
          "note": "Frank (1958) On the theory of liquid crystals; Discuss Faraday Soc 25:19"
        },
        {
          "note": "Coullet & Gilli (1983) New instabilities in some dynamical models; Phys Rev Lett 51:1686"
        }
      ],
      "last_reviewed": "2026-05-07",
      "file": "cross-domain/chemistry-physics/b-soft-matter-liquid-crystal-order.yaml"
    },
    {
      "id": "b-toxicology-environmental-policy",
      "title": "Toxicological dose-response relationships (Paracelsus 1538, linear no-threshold model, hormesis) directly determine environmental regulatory policy (NOAEL, EPA risk assessment, REACH), but the discovery that endocrine disruptors exhibit non-monotonic dose-response curves invalidates the LNT model for these compounds and challenges the precautionary principle's scientific basis.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "contested",
      "bridge_claim": "Paracelsus's 1538 dictum \"the dose makes the poison\" established dose-response monotonicity as the foundation of toxicology: threshold models (NOAEL/LOAEL) and the linear no-threshold (LNT) model for carcinogens both assume that harm increases with dose. Regulatory agencies (EPA, EFSA) use these mod",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-lnt-model-invalid-endocrine-disruptors"
      ],
      "communication_gap": "Toxicologists publish in Toxicological Sciences and Environmental Health Perspectives; regulatory policy analysts publish in Risk Analysis and Environmental Law. The scientific controversy about NMDR curves is contested in toxicology but has already been resolved (in favor of NMDR) in EU regulatory practice for endocrine disruptors. Scientists who discover novel toxicological mechanisms rarely engage with regulatory agencies; regulators who set standards rarely follow the primary toxicology literature in real time. The disconnect means that scientific consensus and regulatory policy can diverge for decades.\n",
      "translation_table": [
        {
          "field_a_term": "dose-response curve (toxicology)",
          "field_b_term": "regulatory threshold (NOAEL, ADI, reference dose)",
          "note": "The dose-response model is mechanistically scientific; the threshold selection is a policy decision"
        },
        {
          "field_a_term": "linear no-threshold (LNT) model",
          "field_b_term": "EPA cancer potency factor / unit risk",
          "note": "LNT underpins carcinogen regulation — any exposure confers proportional risk"
        },
        {
          "field_a_term": "non-monotonic dose-response (NMDR)",
          "field_b_term": "endocrine disruptor regulation failure",
          "note": "NMDR curves mean that \"safe\" low doses set by LNT extrapolation may actually be harmful"
        },
        {
          "field_a_term": "hormesis (beneficial low-dose effects)",
          "field_b_term": "contested regulatory paradigm (Calabrese vs. traditional toxicology)",
          "note": "If hormesis is real, current LNT-based standards are overprotective at low doses but miss low-dose harms"
        },
        {
          "field_a_term": "REACH precautionary principle",
          "field_b_term": "reverse burden of proof in chemical regulation",
          "note": "REACH requires manufacturers to demonstrate safety (hazard communication) rather than requiring regulators to prove harm"
        },
        {
          "field_a_term": "uncertainty factor (10× per uncertain step)",
          "field_b_term": "regulatory conservatism embedding scientific uncertainty",
          "note": "Uncertainty factors translate scientific gaps into quantitative regulatory margins"
        }
      ],
      "references": [
        {
          "note": "Vandenberg et al. (2012) — Hormones and endocrine-disrupting chemicals: low-dose effects and non-monotonic dose responses",
          "doi": "10.1210/er.2011-1050"
        },
        {
          "note": "NRC (1983) — Risk Assessment in the Federal Government: Managing the Process",
          "url": "https://nap.nationalacademies.org/catalog/317/risk-assessment-in-the-federal-government-managing-the-process"
        },
        {
          "note": "Grandjean (2016) — Paracelsus revisited: the dose concept in a complex world",
          "doi": "10.1186/s12940-016-0143-6"
        },
        {
          "note": "Paracelsus (1538) — Third Defense (Septem Defensiones); primary source for 'dosis sola facit venenum'",
          "url": "https://doi.org/10.1007/978-3-662-04438-6"
        }
      ],
      "last_reviewed": "2026-05-06",
      "file": "cross-domain/chemistry-social-science/b-toxicology-environmental-policy.yaml"
    },
    {
      "id": "b-bayesian-oed-x-robotic-chemistry-optimization",
      "title": "Bayesian optimal experimental design (OED) provides a principled acquisition framework for robotic chemistry optimization loops.",
      "source_domain": "",
      "target_domain": "",
      "status": "proposed",
      "bridge_claim": "Robotic chemistry platforms can rank candidate experiments by expected information gain instead of heuristic exploration. The bridge operationalizes uncertainty-aware design and creates auditable stopping criteria.",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-lookahead-oed-reduces-experiments-to-target-yield"
      ],
      "communication_gap": "Adjacent communities use different software stacks and validation norms, so mathematically equivalent tools are often rediscovered in parallel.",
      "translation_table": [
        {
          "field_a_term": "Expected information gain",
          "field_b_term": "Experiment acquisition score",
          "note": "Prioritizes experiments that reduce posterior uncertainty."
        },
        {
          "field_a_term": "Posterior entropy",
          "field_b_term": "Model uncertainty over reaction outcomes",
          "note": "Quantifies remaining ignorance for stop/go decisions."
        },
        {
          "field_a_term": "Lookahead utility",
          "field_b_term": "Multi-step robot planning objective",
          "note": "Balances immediate yield and long-term information."
        }
      ],
      "references": [
        {
          "doi": "10.1098/rsta.1922.0009",
          "note": "Fisher (1922) estimation and information."
        },
        {
          "doi": "10.1017/S0962492910000061",
          "note": "Stuart (2010) Bayesian inverse-problem foundations."
        }
      ],
      "last_reviewed": "2026-05-08",
      "file": "cross-domain/chemistry-statistics/b-bayesian-oed-x-robotic-chemistry-optimization.yaml"
    },
    {
      "id": "b-circadian-entrainment-phase-response-curve",
      "title": "Circadian clock entrainment to light-dark cycles is quantitatively described by the phase response curve (PRC): a one-dimensional map from zeitgeber phase to phase shift that, combined with limit cycle oscillator theory, predicts entrainment range, phase angle, and resynchronisation kinetics after transmeridian travel.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "A circadian clock is a biochemical limit cycle oscillator with period T_free. When exposed to a periodic zeitgeber (light, temperature) with period T_ext, entrainment occurs if the clock can phase-shift to compensate for the mismatch T_ext - T_free each cycle. The PRC phi(theta) gives the phase shif",
      "open_unknowns": [],
      "related_hypotheses": [],
      "communication_gap": "Chronobiologists measure PRCs empirically but rarely connect them to the full mathematical theory of limit cycle entrainment; mathematicians studying phase-coupled oscillators rarely fit their models to real circadian data. Clinical applications (jet lag, shift work, chronotherapy) require quantitative PRC-based models that are rarely used in practice.\n",
      "translation_table": [
        {
          "field_a_term": "phase response curve (PRC) phi(theta) (mathematics)",
          "field_b_term": "measured phase shift vs. light pulse phase in circadian experiments (chronobiology)",
          "note": "PRC measured by giving single light pulses at different circadian phases; fundamental data for clock theory"
        },
        {
          "field_a_term": "Arnold tongue (entrainment range) (mathematics)",
          "field_b_term": "range of light-dark cycle periods that entrain the circadian clock (chronobiology)",
          "note": "Clock entrains to T_ext in [T_free - Delta, T_free + Delta] where Delta = amplitude of PRC"
        },
        {
          "field_a_term": "limit cycle oscillator period T_free (mathematics)",
          "field_b_term": "free-running circadian period (chronobiology)",
          "note": "T_free ~ 24 h in most organisms; deviations from 24 h require larger daily phase shifts to entrain"
        },
        {
          "field_a_term": "phase-locking / stable phase angle theta* (mathematics)",
          "field_b_term": "phase of sleep relative to dusk / dawn (chronobiology)",
          "note": "Entrainment sets theta* where daily PRC phase shift exactly compensates T_ext - T_free"
        }
      ],
      "references": [
        {
          "doi": "10.1007/978-3-662-22492-9",
          "note": "Winfree (1980) - The Geometry of Biological Time; PRC and limit cycle theory"
        },
        {
          "doi": "10.1152/jappl.1988.64.2.557",
          "note": "Jewett & Kronauer (1998) - refinement of a limit cycle oscillator model of the effects of light on the human circadian pacemaker"
        },
        {
          "doi": "10.1177/0748730410381599",
          "note": "Lewy et al. (2010) - the circadian basis of winter depression; melatonin PRC application"
        }
      ],
      "last_reviewed": "2026-05-07",
      "file": "cross-domain/chronobiology-mathematics/b-circadian-entrainment-phase-response-curve.yaml"
    },
    {
      "id": "b-climate-tipping-health",
      "title": "Bifurcation mathematics describing climate tipping points (AMOC collapse, permafrost carbon feedback, ice-sheet runaway) predicts epidemiological phase transitions under climate stress — the same fold-bifurcation and saddle-node dynamics govern both planetary-scale regime shifts and population health threshold crossings.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "proposed",
      "bridge_claim": "Climate science has developed rigorous mathematical frameworks for tipping points: saddle-node bifurcations where a slowly-changing forcing (CO2 concentration, temperature anomaly) drives a system to a threshold beyond which it jumps discontinuously to a new attractor. The AMOC slowdown, Arctic sea-",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-permafrost-carbon-tipping-2point5",
        "h-amoc-saddle-node-bifurcation"
      ],
      "communication_gap": "Climate science and epidemiology are institutionally separated: different funding agencies (NSF/NOAA vs NIH/CDC), different journals, and different mathematical traditions. Climate dynamical systems use bifurcation theory and potential wells; epidemiology uses compartmental ODE models (SIR/SEIR). Both are nonlinear dynamical systems, but practitioners are rarely trained across both literatures. Integrated climate-health modeling remains a niche subdiscipline; the explicit connection to bifurcation theory and tipping cascades is not standard in either community's textbooks.\n",
      "translation_table": [
        {
          "field_a_term": "saddle-node (fold) bifurcation",
          "field_b_term": "R0=1 epidemic threshold / poverty-trap crossing / heat-mortality inflection point"
        },
        {
          "field_a_term": "slow-variable forcing (CO2 concentration)",
          "field_b_term": "slow climate change forcing driving disease range expansion or food insecurity"
        },
        {
          "field_a_term": "alternative stable states (ice-covered vs ice-free Arctic)",
          "field_b_term": "endemic vs disease-free equilibrium; acute vs chronic malnutrition basin"
        },
        {
          "field_a_term": "early-warning indicators (rising AR1, variance)",
          "field_b_term": "pre-epidemic rising case variance, autocorrelation in incidence time series"
        },
        {
          "field_a_term": "hysteresis (return requires forcing well below bifurcation point)",
          "field_b_term": "disease elimination requires R0 far below 1; stunting reversal lags intervention"
        },
        {
          "field_a_term": "tipping cascade (AMOC → monsoon → Amazon dieback)",
          "field_b_term": "climate-health cascade: heat → harvest failure → malnutrition → disease susceptibility"
        }
      ],
      "references": [
        {
          "doi": "10.1126/science.1153842",
          "note": "Lenton et al. (2008) — tipping elements in Earth's climate system; fold bifurcation framework"
        },
        {
          "doi": "10.1126/science.1258649",
          "note": "Scheffer et al. (2015) — generic early-warning signals for critical transitions"
        },
        {
          "doi": "10.1038/s41558-018-0156-3",
          "note": "Watts et al. (2018) — Lancet Countdown on health and climate change"
        },
        {
          "doi": "10.1098/rsif.2012.0758",
          "note": "Dakos et al. (2012) — slowing down as early warning of transitions in ecology and earth system"
        },
        {
          "doi": "10.1038/s41467-019-09735-6",
          "note": "Ryan et al. (2019) — global expansion of dengue fever driven by climate change"
        },
        {
          "doi": "10.1126/science.aad9839",
          "note": "Sherwood & Huber (2010) — wet-bulb temperature limit for human survivability"
        }
      ],
      "last_reviewed": "2026-05-05",
      "file": "cross-domain/climate-medicine/b-climate-tipping-health.yaml"
    },
    {
      "id": "b-coral-bleaching-thermal-stress",
      "title": "Coral bleaching is triggered when the degree-heating-week (DHW) threshold exceeds 8°C-weeks: this nonlinear thermal accumulation metric predicts bleaching probability with AUC~0.85 across reef systems",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "Coral bleaching (expulsion of symbiotic zooxanthellae from coral tissue) occurs when thermal stress accumulates beyond a critical threshold. NOAA's Coral Reef Watch defines the Degree Heating Week (DHW) metric: DHW(t) = Σ_{τ=t-12wk}^{t} max(SST_τ - MMM - 1°C, 0) / 7, where MMM is the Maximum Monthly",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-coral-bleaching-thermal-stress"
      ],
      "communication_gap": "Climate scientists who model ocean temperatures and marine biologists who study coral physiology use the DHW metric without always connecting it to the threshold-dose-response models from toxicology and pharmacology. The ecological threshold literature (tipping points, alternative stable states) provides relevant mathematical frameworks for predicting coral collapse under climate change that are underutilized in reef ecology.\n",
      "translation_table": [
        {
          "field_a_term": "degree-heating-week (DHW) thermal stress index",
          "field_b_term": "cumulative integral of temperature excess above bleaching threshold",
          "note": "DHW has units °C·weeks; functionally analogous to dose in dose-response pharmacology"
        },
        {
          "field_a_term": "bleaching threshold temperature MMM + 1°C",
          "field_b_term": "critical temperature above which ROS production exceeds repair capacity",
          "note": "The +1°C above MMM rather than MMM itself reflects thermal acclimation history"
        },
        {
          "field_a_term": "bleaching probability curve as function of DHW",
          "field_b_term": "sigmoidal dose-response function with EC50 ~ 8°C-weeks",
          "note": "Logistic regression of bleaching incidence on DHW gives slope and threshold parameters"
        },
        {
          "field_a_term": "thermal tolerance variation across Symbiodiniaceae clades",
          "field_b_term": "host-symbiont combination determines the effective EC50 of the dose-response",
          "note": "Clade D symbionts increase bleaching threshold by ~1-2°C — raises DHW threshold"
        }
      ],
      "references": [
        {
          "doi": "10.1046/j.1365-2486.2003.00556.x",
          "note": "Hoegh-Guldberg et al. (1999) Climate change, coral bleaching and the future of the world's coral reefs. Mar Fresh Res 50:839"
        },
        {
          "doi": "10.1126/science.1152509",
          "note": "Donner et al. (2005) Global assessment of coral bleaching and required rates of adaptation. Global Change Biol 11:2251"
        },
        {
          "doi": "10.1126/science.1155485",
          "note": "Hughes et al. (2018) Spatial and temporal patterns of mass bleaching of corals in the Anthropocene. Science 359:80"
        }
      ],
      "last_reviewed": "2026-05-07",
      "file": "cross-domain/climate-science-biology/b-coral-bleaching-thermal-stress.yaml"
    },
    {
      "id": "b-phenological-mismatch-synchrony",
      "title": "Climate-driven phenological mismatch in ecological systems is mathematically equivalent to phase desynchronisation between coupled oscillators: the Kuramoto model of coupled biological clocks predicts the critical climate-sensitivity differential at which trophic synchrony breaks down, and observed mismatch data follow the predicted phase-lag scaling.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "Phenological synchrony — the match between an organism's life-history events (migration, egg-laying, flowering, caterpillar emergence) and the seasonal peak of its food resource — is a prerequisite for fitness in many species. Climate change shifts these peaks differentially: species with different ",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-phenological-synchrony-kuramoto-phase-transition"
      ],
      "communication_gap": "Ecologists studying phenological mismatch and dynamical systems physicists studying coupled oscillators work in separate literature streams. The cross-correlation and coherence methods used in phenology are standard signal-processing tools but are not connected to Kuramoto theory in ecology papers. The Kuramoto model is applied extensively in neuroscience and engineering but ecologists are largely unaware of it. Visser & Both (2005) framed mismatch in terms of linear regression on climate sensitivity without the phase-oscillator formalism that would make the predictions quantitatively rigorous.\n",
      "translation_table": [
        {
          "field_a_term": "Phase φ_i of oscillator i",
          "field_b_term": "Day-of-year of phenological event for species i (DOY_i)",
          "note": "The calendar date of egg-laying, flowering, or emergence is the biological 'phase'"
        },
        {
          "field_a_term": "Natural frequency ω_i",
          "field_b_term": "Climate sensitivity of species i (dDOY/dT_spring)",
          "note": "How many days earlier per degree of spring warming the species advances its timing"
        },
        {
          "field_a_term": "Coupling constant K between oscillator pair (i,j)",
          "field_b_term": "Trophic dependence strength between species i and j",
          "note": "Strong predator-prey dependence = strong phase-locking; weak mutualism = weak coupling"
        },
        {
          "field_a_term": "Frequency mismatch Δω = |ω_i - ω_j|",
          "field_b_term": "Differential climate sensitivity between trophically linked species",
          "note": "Large Δω (species respond very differently to warming) → mismatch, even with coupling"
        },
        {
          "field_a_term": "Kuramoto synchrony order parameter r = |⟨e^{iφ}⟩|",
          "field_b_term": "Population-level phenological overlap between consumer and resource",
          "note": "r = 1 means perfect synchrony; r → 0 means complete mismatch across the population"
        },
        {
          "field_a_term": "Phase transition at critical coupling K_c",
          "field_b_term": "Critical warming threshold for synchrony collapse",
          "note": "When climate sensitivity differential exceeds trophic coupling, synchrony breaks down"
        },
        {
          "field_a_term": "Cross-correlation at lag τ",
          "field_b_term": "Phenological mismatch measured in days",
          "note": "The lag τ* that maximises C(τ) is the observed mismatch between timing series"
        }
      ],
      "references": [
        {
          "doi": "10.1098/rspb.2005.3023",
          "note": "Visser & Both (2005) Proc R Soc B 272:2561 — shifts in phenology; time-as-a-resource mismatch framework"
        },
        {
          "doi": "10.1038/nature04645",
          "note": "Both et al. (2006) Nature 441:81 — climate change and population declines in long-distance migratory birds"
        },
        {
          "doi": "10.1146/annurev-ecolsys-110617-062535",
          "note": "Renner & Zohner (2018) Annu Rev Ecol Evol Syst — climate change and phenological mismatch in trophic interactions"
        },
        {
          "doi": "10.1143/PTP.76.576",
          "note": "Kuramoto (1984) Chemical Oscillations, Waves and Turbulence — original Kuramoto phase model"
        },
        {
          "doi": "10.1103/RevModPhys.77.137",
          "note": "Acebrón et al. (2005) Rev Mod Phys 77:137 — comprehensive review of the Kuramoto model"
        }
      ],
      "last_reviewed": "2026-05-05",
      "file": "cross-domain/climate-science-biology/b-phenological-mismatch-synchrony.yaml"
    },
    {
      "id": "b-carbon-pricing-pigouvian",
      "title": "The social cost of carbon (SCC) is a Pigouvian tax problem — internalising the negative externality of greenhouse gas emissions into market prices — solved within the Ramsey optimal-growth framework extended to climate damage functions, yielding the Stern-Nordhaus integrated assessment model (IAM) as a coupled macroeconomic–climate ODE system.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "Pigou (1920) showed that a competitive market overproduces goods with negative externalities; the welfare-maximising corrective is a tax equal to the marginal social damage at the optimum (the Pigouvian tax τ* = MSD). For CO₂, τ* is the social cost of carbon (SCC): the present value of all damages c",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-ramsey-optimal-carbon-price-tipping-points"
      ],
      "communication_gap": "Climate scientists and economists work in largely separate communities and publish in separate journals. Climate models are validated on physical observables (temperature, sea level, ice extent); economic models are validated on GDP growth and consumption data. The IAM community (Nordhaus, Stern, Weitzman, Dietz) sits at the intersection but is relatively small. Most climate scientists are unfamiliar with Ramsey growth theory; most economists are unfamiliar with radiative forcing calculations. Policy bodies (IPCC Working Group III) bring them together, but the underlying mathematical bridge is rarely made explicit in either field's pedagogy.\n",
      "translation_table": [
        {
          "field_a_term": "Pigouvian tax τ* = marginal social damage",
          "field_b_term": "social cost of carbon (SCC, $/tCO₂)"
        },
        {
          "field_a_term": "Ramsey discount rate r = ρ + η·g",
          "field_b_term": "social discount rate applied to future climate damages"
        },
        {
          "field_a_term": "pure rate of time preference ρ (Ramsey)",
          "field_b_term": "Stern-Nordhaus ethical disagreement parameter"
        },
        {
          "field_a_term": "Hamiltonian co-state variable (optimal control)",
          "field_b_term": "shadow price of atmospheric CO₂ = SCC"
        },
        {
          "field_a_term": "state variable K(t) (Ramsey capital stock)",
          "field_b_term": "DICE capital stock K(t) coupled to climate damage D(T)"
        },
        {
          "field_a_term": "transient climate response TCR (climate science)",
          "field_b_term": "temperature sensitivity parameter in IAM damage function"
        },
        {
          "field_a_term": "fat-tailed uncertainty (Weitzman, extreme value theory)",
          "field_b_term": "catastrophic climate damage scenarios (tipping points, ECS tail)"
        }
      ],
      "references": [
        {
          "url": "https://www.cambridge.org/gb/universitypress/subjects/economics/public-economics-and-public-policy/economics-welfare",
          "note": "Pigou (1920) The Economics of Welfare — Macmillan, externality and Pigouvian tax framework"
        },
        {
          "doi": "10.2307/2222(10.2307/2222()",
          "note": "Ramsey (1928) A mathematical theory of saving, Econ J 38:543 — optimal growth framework"
        },
        {
          "doi": "10.1162/rest.89.1.1",
          "note": "Nordhaus (2007) A review of the Stern Review on the Economics of Climate Change, Rev Econ Stat 89:1"
        },
        {
          "url": "https://webarchive.nationalarchives.gov.uk/ukgwa/20100407172811/http://www.hm-treasury.gov.uk/stern_review_report.htm",
          "note": "Stern (2006) The Stern Review on the Economics of Climate Change — HM Treasury"
        },
        {
          "doi": "10.1093/reep/rep005",
          "note": "Weitzman (2009) On modeling and interpreting the economics of catastrophic climate change, Rev Econ Stat 91:1"
        }
      ],
      "last_reviewed": "2026-05-05",
      "file": "cross-domain/climate-science-economics/b-carbon-pricing-pigouvian.yaml"
    },
    {
      "id": "b-integrated-assessment-social-cost-carbon",
      "title": "Integrated Assessment Models (DICE, PAGE, FUND) couple atmospheric carbon cycle physics to economic damage functions; the social cost of carbon — the present value of marginal damage from one tonne CO₂ — is the bridge where atmospheric physics and welfare economics meet, with the discount rate as the critical contested parameter.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "Integrated Assessment Models (IAMs) are the formal bridge between physical climate science and economic policy. They translate atmospheric CO₂ concentrations into temperature changes (physics) and then into economic damages (economics), yielding the social cost of carbon (SCC).\n1. The IAM structure.",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-scc-convex-damages-fat-tails"
      ],
      "communication_gap": "Atmospheric physicists and welfare economists use incompatible frameworks and rarely co-author. IAM modelers (Nordhaus — economics Nobel 2018; Hansen — atmospheric physicist) have fundamentally different assumptions about discount rates, damage functions, and uncertainty handling. The IPCC Working Group III (mitigation economics) and Working Group I (physical science) are often written by non-overlapping author groups with limited cross-citation.\n",
      "translation_table": [
        {
          "field_a_term": "radiative forcing ΔF (atmospheric physics)",
          "field_b_term": "climate sensitivity parameter λ in damage function (economics)",
          "note": "Physical forcing determines temperature; temperature enters damage function"
        },
        {
          "field_a_term": "carbon cycle (geophysics / atmospheric chemistry)",
          "field_b_term": "emissions abatement cost curve (environmental economics)",
          "note": "Carbon cycle determines atmospheric lifetime; abatement changes trajectory"
        },
        {
          "field_a_term": "equilibrium climate sensitivity ECS (physics)",
          "field_b_term": "expected damages per degree warming (economics)",
          "note": "ECS uncertainty is the primary physical input to SCC uncertainty"
        },
        {
          "field_a_term": "global surface temperature anomaly T(t) (climate science)",
          "field_b_term": "damage function input D(T) as fraction of GDP (economics)",
          "note": "Temperature is the bridge variable between physical and economic models"
        },
        {
          "field_a_term": "radiative budget and energy balance (atmospheric physics)",
          "field_b_term": "cost-benefit analysis of mitigation vs. adaptation (economics)",
          "note": "Physical irreversibilities (tipping points) map to economic lock-in costs"
        },
        {
          "field_a_term": "tipping points / abrupt climate change (climate science)",
          "field_b_term": "fat-tailed risk distributions / Weitzman dismal theorem (economics)",
          "note": "Physical tipping points produce non-normal economic loss distributions"
        }
      ],
      "references": [
        {
          "doi": "10.1126/science.258.5086.1315",
          "note": "Nordhaus (1992) Science 258:1315 — optimal transition path to a greenhouse gas-free world"
        },
        {
          "url": "https://www.cambridge.org/core/books/economics-of-climate-change/A1E0BBF2F0ED8E2E4142A9C878052204",
          "note": "Stern (2007) The Economics of Climate Change — Cambridge University Press (Stern Review)"
        },
        {
          "doi": "10.1257/jel.47.4.703",
          "note": "Weitzman (2009) J Econ Lit 47:703 — on modeling and interpreting the economics of catastrophic climate change"
        },
        {
          "doi": "10.1086/685908",
          "note": "Howard & Sterner (2017) J Assoc Environ Resour Econ — few and not so far between"
        }
      ],
      "last_reviewed": "2026-05-06",
      "file": "cross-domain/climate-science-economics/b-integrated-assessment-social-cost-carbon.yaml"
    },
    {
      "id": "b-diffusion-model-x-ensemble-downscaling-bias-correction",
      "title": "Diffusion generative modeling bridges stochastic denoising dynamics and ensemble climate downscaling bias correction.",
      "source_domain": "",
      "target_domain": "",
      "status": "proposed",
      "bridge_claim": "Speculative analogy (to be empirically validated): Reverse-diffusion sampling can act as a controllable stochastic refinement operator analogous to ensemble post-processing used to downscale and debias coarse climate simulations.",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-diffusion-downscaling-improves-extreme-precipitation-fidelity"
      ],
      "communication_gap": "Domain operators prioritize interpretable reliability diagnostics, while ML work often prioritizes aggregate accuracy without deployment-grade uncertainty audits.",
      "translation_table": [
        {
          "field_a_term": "model prior",
          "field_b_term": "domain prior",
          "note": "Both constrain inference in data-sparse regimes."
        },
        {
          "field_a_term": "uncertainty estimate",
          "field_b_term": "risk-aware decision support",
          "note": "Uncertainty quality determines practical utility."
        },
        {
          "field_a_term": "out-of-distribution behavior",
          "field_b_term": "deployment robustness",
          "note": "Shift sensitivity governs real-world reliability."
        }
      ],
      "references": [
        {
          "arxiv": "2006.11239",
          "note": "Denoising Diffusion Probabilistic Models."
        }
      ],
      "last_reviewed": "2026-05-08",
      "file": "cross-domain/climate-science-machine-learning/b-diffusion-model-x-ensemble-downscaling-bias-correction.yaml"
    },
    {
      "id": "b-distributionally-robust-optimization-x-deep-uncertainty-scenario-planning",
      "title": "Distributionally robust optimization bridges ambiguity-set modeling in mathematical optimization with climate adaptation planning under deep uncertainty in forcing and impacts.",
      "source_domain": "",
      "target_domain": "",
      "status": "proposed",
      "bridge_claim": "Established optimization literature formalizes worst-case or robust expectation objectives over uncertainty sets (including Wasserstein neighborhoods); speculative analogy for climate planning—ambiguity sets must encode physically plausible coupling across variables or DRO solutions become overly pe",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-wasserstein-dro-improves-tail-safe-adaptation-metrics"
      ],
      "communication_gap": "Optimization theory emphasizes tractable reformulations while climate practice emphasizes storyline scenarios that may not map cleanly to balls in Wasserstein space.",
      "translation_table": [
        {
          "field_a_term": "ambiguity radius",
          "field_b_term": "scenario ensemble spread / belief tolerance",
          "note": "Calibration differs between statistical samples and physics ensembles."
        },
        {
          "field_a_term": "robust objective value",
          "field_b_term": "precautionary budget / reserve margin",
          "note": "Decision framing differs by stakeholder accounting."
        },
        {
          "field_a_term": "dual adversarial perturbation",
          "field_b_term": "stress-test perturbation of tail risks",
          "note": "Interpretability demands domain constraints."
        }
      ],
      "references": [
        {
          "arxiv": "1710.10571",
          "note": "Principled adversarial / distributional robustness framing commonly cited in robust ML and optimization bridges."
        }
      ],
      "last_reviewed": "2026-05-09",
      "file": "cross-domain/climate-science-mathematics/b-distributionally-robust-optimization-x-deep-uncertainty-scenario-planning.yaml"
    },
    {
      "id": "b-navier-stokes-atmospheric-dynamics",
      "title": "The Navier-Stokes equations on a rotating sphere govern atmospheric and oceanic dynamics — geostrophic balance, Rossby waves, the quasi-geostrophic approximation, and turbulent energy cascade from the Kolmogorov theory are all solutions or approximations of the fundamental fluid equations that connect mathematics to weather forecasting and climate science.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "The Navier-Stokes equations describe fluid motion:\n\n  ρ(∂v/∂t + (v·∇)v) = -∇p + μ∇²v + F\n\nOn a rotating Earth, F includes the Coriolis force: F_Cor = -2ρΩ × v, where Ω is the Earth's angular velocity. This single mathematical framework generates all large-scale atmospheric and oceanic dynamics.\nKEY ",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-geostrophic-balance-climate-change"
      ],
      "communication_gap": "Charney (1948) published the quasi-geostrophic theory in Geophysics Publications, a Norwegian journal inaccessible to the applied mathematics community. The connection between the Navier-Stokes equations and weather prediction is discussed in dynamical meteorology textbooks (Holton 2004) but not in PDE or fluid mechanics courses. The Kolmogorov turbulence cascade is covered in physics and engineering, but the distinction between 2D (atmospheric, inverse cascade) and 3D (forward cascade) turbulence is rarely taught outside of atmospheric science. The ML weather modelling revolution has created a large community of ML researchers who use atmospheric data without fluid dynamics background.\n",
      "translation_table": [
        {
          "field_a_term": "Navier-Stokes equations (rotating frame)",
          "field_b_term": "Equations of motion for atmosphere and ocean",
          "note": "With hydrostatic approximation (dw/dt ≈ 0) and Boussinesq approximation for ocean; full equations too expensive to resolve"
        },
        {
          "field_a_term": "Rossby number Ro = U/fL",
          "field_b_term": "Scale parameter distinguishing synoptic (large) from mesoscale (small) dynamics",
          "note": "Ro << 1: geostrophic balance dominates; Ro ~ 1: fronts, cyclogenesis; Ro > 1: convection, turbulence"
        },
        {
          "field_a_term": "Potential vorticity (Ertel's PV)",
          "field_b_term": "Conserved dynamical quantity in adiabatic, frictionless flow",
          "note": "PV = (ζ + f) / ρ (∂θ/∂z); PV maps are the dynamical meteorologist's primary diagnostic tool"
        },
        {
          "field_a_term": "Kolmogorov -5/3 energy spectrum",
          "field_b_term": "Atmospheric inertial-range turbulence spectrum (mesoscale)",
          "note": "Observed in aircraft measurements; switches to -3 at synoptic scales due to 2D inverse cascade"
        },
        {
          "field_a_term": "Ekman layer solution",
          "field_b_term": "Wind-driven ocean surface current (Ekman spiral)",
          "note": "Drives Ekman pumping (upwelling/downwelling) and Sverdrup balance for ocean gyre circulation"
        },
        {
          "field_a_term": "Lyapunov exponent of atmosphere",
          "field_b_term": "Practical limit of deterministic weather forecasting (~7-10 days)",
          "note": "Lorenz (1969): finite predictability is an intrinsic property of the turbulent atmosphere, not a data problem"
        },
        {
          "field_a_term": "Geostrophic balance (pressure gradient = Coriolis)",
          "field_b_term": "Why winds blow parallel to isobars; the basis of synoptic weather analysis",
          "note": "Deviation from geostrophic balance (ageostrophic wind) drives frontal dynamics and cyclone development"
        }
      ],
      "references": [
        {
          "note": "Charney (1948) Geophys Publ 17:3 — on the scale of atmospheric motions; quasi-geostrophic theory",
          "url": "https://www.ucar.edu/staff/charney/charney1948.pdf"
        },
        {
          "note": "Holton (2004) An Introduction to Dynamic Meteorology, 4th ed. (Academic Press) — standard graduate text",
          "url": "https://www.elsevier.com/books/an-introduction-to-dynamic-meteorology/holton/978-0-12-354015-7"
        },
        {
          "note": "Lorenz (1969) Tellus 21:289 — predictability: a problem partly resolved; chaos and forecast limits",
          "url": "https://onlinelibrary.wiley.com/doi/10.3402/tellusa.v21i3.10086"
        },
        {
          "doi": "10.1098/rspa.1991.0075",
          "note": "Kolmogorov (1941) — local turbulent structure at very large Reynolds numbers; -5/3 energy spectrum"
        },
        {
          "doi": "10.1126/science.abn7950",
          "note": "Lam et al. (2023) Science 382:1416 — GraphCast: learning skillful medium-range global weather forecasting"
        }
      ],
      "last_reviewed": "2026-05-05",
      "file": "cross-domain/climate-science-mathematics/b-navier-stokes-atmospheric-dynamics.yaml"
    },
    {
      "id": "b-optimal-transport-bias-correction-x-climate-downscaling",
      "title": "Optimal-transport distribution mapping bridges mathematical transport theory and climate downscaling bias correction.",
      "source_domain": "",
      "target_domain": "",
      "status": "proposed",
      "bridge_claim": "Distributional bias correction in climate projections can be framed as an optimal transport problem, preserving rank structure while aligning modeled and observed distributions. Extreme-tail transfer remains partly speculative and requires explicit caveats.",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-ot-bias-correction-improves-tail-risk-calibration"
      ],
      "communication_gap": "Adjacent communities use different software stacks and validation norms, so mathematically equivalent tools are often rediscovered in parallel.",
      "translation_table": [
        {
          "field_a_term": "Cost-minimizing transport plan",
          "field_b_term": "Bias-correction mapping",
          "note": "Minimizes distortion while matching empirical distributions."
        },
        {
          "field_a_term": "Wasserstein distance",
          "field_b_term": "Distribution shift diagnostic",
          "note": "Quantifies correction magnitude and residual mismatch."
        },
        {
          "field_a_term": "Monge map regularity",
          "field_b_term": "Temporal consistency of corrected series",
          "note": "Helps prevent artifact discontinuities."
        }
      ],
      "references": [
        {
          "doi": "10.1098/rsta.1922.0009",
          "note": "Fisher (1922) estimation and information."
        },
        {
          "doi": "10.1017/S0962492910000061",
          "note": "Stuart (2010) Bayesian inverse-problem foundations."
        }
      ],
      "last_reviewed": "2026-05-08",
      "file": "cross-domain/climate-science-mathematics/b-optimal-transport-bias-correction-x-climate-downscaling.yaml"
    },
    {
      "id": "b-stochastic-climate-hasselmann",
      "title": "Hasselmann's stochastic climate theory (1976) models slow ocean temperature as a Langevin equation dT/dt = −λT + σξ(t) forced by fast atmospheric white noise, predicting a red noise power spectrum S(ω) = σ²/(λ²+ω²) that matches observed ocean variability — the same Fokker-Planck framework as Brownian motion.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "Hasselmann (1976, Nobel Prize in Physics 2021) derived a stochastic theory of climate variability by separating timescales: fast atmospheric \"weather\" fluctuations act as stochastic forcing on slow ocean variables, identical to how molecular collisions force a Brownian particle.\nThe Langevin equatio",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-hasselmann-red-noise-ocean-temperature-spectrum"
      ],
      "communication_gap": "Hasselmann published in Tellus, a meteorology journal, using statistical mechanics language; Benzi et al. (1982) published stochastic resonance simultaneously in physics and climate journals. The mathematical community working on SDEs and statistical mechanics did not engage with climate science until the 1990s-2000s. The Nobel Committee (2021) explicitly cited the cross-disciplinary nature of Hasselmann's work — climate physics required statistical mechanics.\n",
      "translation_table": [
        {
          "field_a_term": "ocean surface temperature T(t)",
          "field_b_term": "position of Brownian particle x(t) in Ornstein-Uhlenbeck process",
          "note": "Ocean is the \"heavy particle\"; atmosphere is the molecular bath"
        },
        {
          "field_a_term": "damping λ (surface heat flux restoring)",
          "field_b_term": "friction coefficient in Langevin equation",
          "note": "λ ≈ 10 W/m²/K for typical ocean mixed layer; τ = 1/λ ≈ months"
        },
        {
          "field_a_term": "atmospheric weather fluctuations σξ(t)",
          "field_b_term": "white noise forcing (delta-correlated)",
          "note": "Weather decorrelates on ~10 days; ocean memory is months-centuries"
        },
        {
          "field_a_term": "red noise power spectrum S(ω) = σ²/(λ²+ω²)",
          "field_b_term": "Lorentzian spectrum of OU process — integrates white noise",
          "note": "S(ω) ∝ 1/ω² at ω >> λ; flat at ω << λ (white noise for very slow variations)"
        },
        {
          "field_a_term": "Fokker-Planck equation for P(T,t)",
          "field_b_term": "diffusion equation with drift (statistical mechanics)",
          "note": "Stationary solution: Gaussian P(T) ∝ exp(−λT²/σ²) — exactly Boltzmann distribution"
        },
        {
          "field_a_term": "stochastic resonance in glacial cycles",
          "field_b_term": "noise-induced enhancement of weak periodic signal in bistable system",
          "note": "Optimal noise amplitude ≈ barrier height / correlation time"
        }
      ],
      "references": [
        {
          "note": "Hasselmann, K. (1976) Tellus 28:473 — stochastic climate models"
        },
        {
          "note": "Benzi et al. (1982) Tellus 34:10 — stochastic resonance and glacial cycles"
        },
        {
          "note": "Frankignoul & Hasselmann (1977) Tellus 29:289 — observed SST spectrum matches red noise"
        },
        {
          "note": "Palmer & Williams (eds.) (2010) Stochastic Physics and Climate Modelling. Cambridge University Press."
        }
      ],
      "last_reviewed": "2026-05-06",
      "file": "cross-domain/climate-science-mathematics/b-stochastic-climate-hasselmann.yaml"
    },
    {
      "id": "b-radiative-forcing-energy-balance",
      "title": "Earth's greenhouse effect is governed by the same radiative transfer physics as blackbody emission and molecular spectroscopy — CO2 forcing ΔF = 5.35 ln(C/C₀) W/m² follows directly from Beer-Lambert absorption in the 15 μm bending band, and climate sensitivity is the Planck feedback plus amplifying thermodynamic feedbacks.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "Earth's energy balance is a direct application of blackbody radiation physics. Incoming solar power: S₀/4·(1−α) ≈ 240 W/m² (α ≈ 0.30 planetary albedo). Outgoing longwave radiation: σT_eff⁴ where T_eff ≈ 255 K (effective emission temperature). The greenhouse effect arises because CO₂ absorbs and re-e",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-climate-sensitivity-emergent-constraint-water-vapor"
      ],
      "communication_gap": "Climate scientists use forcing and feedback language developed for policy communication, while physicists use spectroscopic language (transition moments, line strengths, pressure broadening). The mathematical connection — that ΔF = 5.35 ln(C/C₀) is a Taylor expansion of the Beer-Lambert law integrated over the HITRAN spectrum — is not taught in either physics or climate courses. Journal siloing between Journal of Geophysical Research (Atmospheres) and Journal of Quantitative Spectroscopy & Radiative Transfer has maintained the gap despite the two fields sharing fundamental physics.\n",
      "translation_table": [
        {
          "field_a_term": "CO2 greenhouse effect",
          "field_b_term": "Beer-Lambert absorption in 15 μm ν₂ bending band",
          "note": "Same molecular physics; forcing is logarithmic because wings broaden as band saturates"
        },
        {
          "field_a_term": "radiative forcing ΔF = 5.35 ln(C/C₀) W/m²",
          "field_b_term": "integrated line absorption in pressure-broadened Lorentzian spectrum",
          "note": "The coefficient 5.35 W/m² is an empirical fit to line-by-line radiative transfer calculations"
        },
        {
          "field_a_term": "Planck feedback (−3.2 W/m²/K)",
          "field_b_term": "Stefan-Boltzmann derivative: −dσT⁴/dT = −4σT³ ≈ −3.2 W/m²/K at T=255 K",
          "note": "Exact blackbody physics; not a parameterization but a first-principles result"
        },
        {
          "field_a_term": "water vapor feedback (+1.8 W/m²/K)",
          "field_b_term": "Clausius-Clapeyron: d(ln p_sat)/dT = L/RT² ≈ 7%/K at 280 K",
          "note": "Warmer air holds more water vapor, amplifying the greenhouse effect"
        },
        {
          "field_a_term": "equilibrium climate sensitivity (ECS)",
          "field_b_term": "inverse feedback parameter lambda = 1/(lambda_0_inv - f) where f = sum of feedbacks"
        },
        {
          "field_a_term": "effective emission temperature T_eff = 255 K",
          "field_b_term": "blackbody temperature at which σT⁴ = absorbed solar flux",
          "note": "Difference from surface T ≈ 288 K (33 K) is the greenhouse warming"
        }
      ],
      "references": [
        {
          "doi": "10.1029/98GL01908",
          "note": "Myhre et al. (1998) Geophys Res Lett 25:2715 — ΔF = 5.35 ln(C/C₀) formula"
        },
        {
          "doi": "10.1175/1520-0469(1967)024<0241:TEOTAW>2.0.CO;2",
          "note": "Manabe & Wetherald (1967) J Atmos Sci 24:241 — first GCM with water vapor feedback"
        },
        {
          "doi": "10.1029/2019RG000678",
          "note": "Sherwood et al. (2020) Rev Geophys 58:e2019RG000678 — ECS assessment, 2.6–3.9 K likely range"
        },
        {
          "note": "IPCC AR6 WGI (2021) Chapter 7 — Energy budget and climate feedbacks"
        }
      ],
      "last_reviewed": "2026-05-06",
      "file": "cross-domain/climate-science-physics/b-radiative-forcing-energy-balance.yaml"
    },
    {
      "id": "b-urban-heat-islands-energy-balance",
      "title": "Urban heat islands arise from the surface energy balance equation: Q* = QH + QE + QG where reduced QE (latent heat from evapotranspiration) increases QH (sensible heat), raising urban air temperature 1-8°C above rural areas",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "The urban surface energy balance (SEB) partitions net radiation Q* into latent heat flux QE (evapotranspiration), sensible heat flux QH (heating air), and ground heat flux QG: Q* = QH + QE + QG + QA where QA is anthropogenic heat (A/C, vehicles). The urban heat island (UHI) magnitude ΔT = T_urban - ",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-urban-heat-islands-energy-balance"
      ],
      "communication_gap": "Urban planners and city governments who address UHI through green infrastructure and cool pavements rarely connect to the SEB physics that explains why these interventions work quantitatively. Atmospheric physicists who model urban energy balance publish in boundary-layer meteorology journals with limited reach to urban planning and public health communities who need the results.\n",
      "translation_table": [
        {
          "field_a_term": "urban surface albedo α (fraction of solar radiation reflected)",
          "field_b_term": "determines Q* = S(1-α) + L_down - L_up = net radiation",
          "note": "Urban α~0.1-0.15 (dark asphalt) vs rural α~0.2-0.25 (vegetation); Q* higher in cities"
        },
        {
          "field_a_term": "Bowen ratio β = QH/QE",
          "field_b_term": "energy balance partitioning parameter — high β means energy goes to heating air",
          "note": "Cool roofs/green roofs reduce β by restoring evaporative cooling component QE"
        },
        {
          "field_a_term": "urban canyon sky view factor Ψ_sky",
          "field_b_term": "geometric fraction of sky visible from street level; determines longwave trapping",
          "note": "Ψ_sky ~ 0.3-0.7 in urban canyons; reduces longwave emission at night, raising night temperature"
        },
        {
          "field_a_term": "thermal effusivity of urban materials e = √(kρc_p)",
          "field_b_term": "material property controlling ground heat storage QG",
          "note": "Concrete e~1800 J·m⁻²·K⁻¹·s⁻¹/² vs soil e~400 — cities store ~4x more daytime heat"
        }
      ],
      "references": [
        {
          "doi": "10.1175/1520-0450(1982)021<1553:SSBUFB>2.0.CO;2",
          "note": "Oke (1982) The energetic basis of the urban heat island. Q J Roy Meteorol Soc 108:1"
        },
        {
          "doi": "10.1002/joc.1813",
          "note": "Stewart & Oke (2012) Local climate zones for urban ecosystem studies. Bull Am Meteorol Soc 93:1879"
        },
        {
          "doi": "10.1175/BAMS-D-12-00013.1",
          "note": "Grimmond et al. (2010) The international urban energy balance models comparison project. J Appl Meteorol"
        }
      ],
      "last_reviewed": "2026-05-07",
      "file": "cross-domain/climate-science-physics/b-urban-heat-islands-energy-balance.yaml"
    },
    {
      "id": "b-change-point-bayesian-online-detection-x-glacier-calving-regime-shifts",
      "title": "Bayesian online change-point detection links streaming anomaly methods to glacier calving regime-shift monitoring.",
      "source_domain": "",
      "target_domain": "",
      "status": "proposed",
      "bridge_claim": "Speculative analogy: Glacier calving intensity time series can be monitored with Bayesian online change-point detection to detect regime transitions earlier than fixed-threshold heuristics.",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-bocd-with-hazard-adaptation-detects-glacier-regime-shifts-earlier"
      ],
      "communication_gap": "Communities use different terminology and validation conventions, masking transferable method equivalence.",
      "translation_table": [],
      "references": [
        {
          "url": "https://arxiv.org/abs/0710.3742",
          "note": "Adams and MacKay BOCPD method."
        }
      ],
      "last_reviewed": "2026-05-08",
      "file": "cross-domain/climate-science-statistics/b-change-point-bayesian-online-detection-x-glacier-calving-regime-shifts.yaml"
    },
    {
      "id": "b-kalman-smoothing-x-tree-ring-paleoclimate-reconstruction",
      "title": "State-space Kalman smoothing unifies noisy proxy assimilation and tree-ring paleoclimate reconstruction.",
      "source_domain": "",
      "target_domain": "",
      "status": "proposed",
      "bridge_claim": "Speculative analogy: Tree-ring proxy calibration can be framed as latent-state smoothing where growth observations are noisy sensors of climate states, enabling shared uncertainty diagnostics between reanalysis and paleoclimate pipelines.",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-kalman-smoother-outperforms-static-regression-for-tree-ring-temperature"
      ],
      "communication_gap": "Communities use different terminology and validation conventions, masking transferable method equivalence.",
      "translation_table": [],
      "references": [
        {
          "doi": "10.1115/1.3662552",
          "note": "Kalman (1960) filtering foundations."
        }
      ],
      "last_reviewed": "2026-05-08",
      "file": "cross-domain/climate-science-statistics/b-kalman-smoothing-x-tree-ring-paleoclimate-reconstruction.yaml"
    },
    {
      "id": "b-efficient-coding-perception",
      "title": "The efficient coding hypothesis (Barlow 1961) unifies sensory neuroscience and information theory: retinal whitening, V1 Gabor receptive fields, and auditory log-frequency tuning all follow from maximizing Shannon information transmission per unit metabolic cost.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "Barlow (1961) proposed that the goal of sensory processing is to represent the environment using the minimum number of active neurons — equivalently, to maximize the Shannon mutual information I(stimulus; neural response) subject to a metabolic cost constraint. This efficient coding hypothesis makes",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-v1-gabor-infomax-prediction",
        "h-surprisal-n400-mismatch-equivalence"
      ],
      "communication_gap": "Barlow published in Sensory Communication (MIT Press, 1961) — read by physiologists but not by information theorists. Shannon's work was read by engineers. Olshausen & Field (1996) appeared in Nature and explicitly bridged the communities, but their result is still not standard curriculum in neuroscience programs, which typically teach Gabor filters as a phenomenological description rather than as the output of an information-theoretic optimization. The mathematics required (ICA, information bottleneck, natural scene statistics) sits across the neuroscience/statistics disciplinary boundary.\n",
      "translation_table": [
        {
          "field_a_term": "Shannon channel capacity C = max I(X;Y)",
          "field_b_term": "Optimal neural population code efficiency",
          "note": "Sensory neurons are hypothesised to operate near channel capacity for natural inputs"
        },
        {
          "field_a_term": "Redundancy reduction (removing statistical dependencies)",
          "field_b_term": "Lateral inhibition in retina and cortex",
          "note": "Centre-surround receptive fields decorrelate spatially redundant signals"
        },
        {
          "field_a_term": "Independent component analysis (ICA) of natural images",
          "field_b_term": "Gabor wavelets = V1 simple cell receptive fields",
          "note": "Olshausen & Field (1996), Bell & Sejnowski (1997) — learning confirms prediction"
        },
        {
          "field_a_term": "Minimum description length (MDL) / sparse code",
          "field_b_term": "Sparse neural firing (few active neurons at a time)",
          "note": "Metabolic cost = code length; sparseness = compression"
        },
        {
          "field_a_term": "Information bottleneck trade-off β·I(T;Y) - I(T;X)",
          "field_b_term": "Compression-relevance hierarchy from V1 to IT cortex",
          "note": "Each visual area discards irrelevant information while preserving task-relevant structure"
        },
        {
          "field_a_term": "Power spectrum of natural images: S(f) ~ 1/f²",
          "field_b_term": "Whitening filter = centre-surround antagonism",
          "note": "The retina's filter is the Wiener filter for the natural image ensemble"
        },
        {
          "field_a_term": "Logarithmic frequency axis (equal-ratio bands)",
          "field_b_term": "Cochlear tonotopic map (mel / bark scale)",
          "note": "Log spacing maximises information about 1/f natural sounds"
        }
      ],
      "references": [
        {
          "note": "Barlow (1961) 'Possible principles underlying the transformation of sensory messages' in Sensory Communication (MIT Press) — original efficient coding hypothesis"
        },
        {
          "doi": "10.1038/381607a0",
          "note": "Olshausen & Field (1996) Nature 381:607 — sparse coding of natural images yields V1-like Gabor basis"
        },
        {
          "doi": "10.1016/S0042-6989(97)00169-7",
          "note": "Bell & Sejnowski (1997) Vision Research 37:3327 — ICA of natural images yields Gabor filters"
        },
        {
          "arxiv": "physics/0004057",
          "note": "Tishby, Pereira & Bialek (2000) — information bottleneck principle"
        },
        {
          "doi": "10.1162/neco.1992.4.2.196",
          "note": "Atick & Redlich (1992) Neural Comput 4:196 — retina as Wiener filter for natural images"
        }
      ],
      "last_reviewed": "2026-05-05",
      "file": "cross-domain/cognitive-science-information/b-efficient-coding-perception.yaml"
    },
    {
      "id": "b-embodied-cognition-conceptual-metaphor",
      "title": "Lakoff and Johnson's conceptual metaphor theory (MORE IS UP, ARGUMENT IS WAR) is grounded in embodied cognition — abstract concepts recruit sensorimotor cortex because they are structured by bodily experience, bridging linguistic structure to neural substrate to bodily interaction with the physical world.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "CONCEPTUAL METAPHOR (Lakoff & Johnson 1980): Abstract concepts are structured by concrete bodily experience: - MORE IS UP: \"prices are rising\", \"spirits lifted\", \"high hopes\" - ARGUMENT IS WAR: \"attack a position\", \"demolish an argument\", \"defend a claim\" - TIME IS SPACE: \"looking forward to the fut",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-embodied-simulation-language-comprehension-graded"
      ],
      "communication_gap": "Linguistic semanticists and cognitive neuroscientists work in separate communities despite deep connections. The original Lakoff-Johnson theory was linguistic-conceptual; the neural grounding was added later by Gallese, Rizzolatti, and Bergen. Formal semanticists (Montague grammar, model-theoretic semantics) largely reject the embodied approach.\n",
      "translation_table": [
        {
          "field_a_term": "conceptual metaphor schema (linguistics)",
          "field_b_term": "neural simulation in sensorimotor cortex (neuroscience)",
          "note": "Each conceptual metaphor predicts specific sensorimotor cortex activation during abstract language comprehension"
        },
        {
          "field_a_term": "image schema (e.g. CONTAINER, FORCE, BALANCE) (linguistics)",
          "field_b_term": "affordance representation (cognitive science / perception)",
          "note": "Gibson's affordances and Lakoff's image schemas both capture body-environment interaction structure"
        },
        {
          "field_a_term": "SOURCE-PATH-GOAL schema (linguistics)",
          "field_b_term": "motor planning: initial state, trajectory, goal state (neuroscience)",
          "note": "Linguistic path structure mirrors motor action sequence structure — same neural system"
        },
        {
          "field_a_term": "primary metaphor (direct sensorimotor grounding)",
          "field_b_term": "conditioned stimulus-response association (learning theory)",
          "note": "Primary metaphors (AFFECTION IS WARMTH) formed by co-occurrence of physical sensation and abstract concept in development"
        }
      ],
      "references": [
        {
          "note": "Lakoff & Johnson (1980) Metaphors We Live By, University of Chicago Press"
        },
        {
          "doi": "10.1515/cogl.2005.16.3.455",
          "note": "Gallese & Lakoff (2005) Cogn Linguist 16:455 — neural mirror system and conceptual metaphor"
        },
        {
          "doi": "10.3758/BF03196313",
          "note": "Glenberg & Kaschak (2002) Psychon Bull Rev 9:558 — action-sentence compatibility"
        },
        {
          "note": "Bergen (2012) Louder Than Words, Basic Books — embodied simulation review"
        }
      ],
      "last_reviewed": "2026-05-06",
      "file": "cross-domain/cognitive-science-linguistics/b-embodied-cognition-conceptual-metaphor.yaml"
    },
    {
      "id": "b-semantic-memory-word-vectors",
      "title": "Distributional semantic models (word2vec, GloVe) produce vector representations that predict human semantic similarity judgments, priming latencies, and neural activation patterns in inferior temporal cortex, formalizing the distributional hypothesis of meaning",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "The cosine similarity between word vectors trained on large corpora predicts human semantic similarity ratings (Pearson r ~ 0.8) and word association norms, because both reflect the co-occurrence statistics of words in natural language, implementing the distributional hypothesis: words occurring in ",
      "open_unknowns": [],
      "related_hypotheses": [],
      "communication_gap": "Cognitive scientists study semantic memory through behavioral experiments and brain imaging while NLP researchers develop distributional models for downstream applications; the theoretical connection between co-occurrence statistics and cognitive semantic organization is acknowledged but rarely integrated into either neural or computational models of meaning.",
      "translation_table": [
        {
          "field_a_term": "semantic memory network (human mental lexicon)",
          "field_b_term": "high-dimensional vector space where word vectors cluster by meaning",
          "note": "Both represent words by their relationships to other words; similarity structure is the primary organizing principle"
        },
        {
          "field_a_term": "semantic priming (faster RT for semantically related word pairs)",
          "field_b_term": "high cosine similarity between prime and target word vectors",
          "note": "Priming effect magnitude correlates with vector cosine similarity across word pairs"
        },
        {
          "field_a_term": "semantic category membership (dog is a mammal)",
          "field_b_term": "vector arithmetic: king - man + woman = queen",
          "note": "Linear vector offsets capture semantic relations (gender, country-capital, species-genus)"
        },
        {
          "field_a_term": "anterior temporal lobe (ATL) semantic hub",
          "field_b_term": "RSA correlation between neural activation patterns and word vector similarity",
          "note": "fMRI MVPA shows ATL geometry matches word vector geometry (Huth et al. 2016)"
        }
      ],
      "references": [
        {
          "doi": "10.48550/arXiv.1301.3781",
          "note": "Mikolov et al. (2013) - word2vec: efficient estimation of word representations in vector space"
        },
        {
          "doi": "10.1038/nn.4012",
          "note": "Huth et al. (2016) Nature Neurosci - natural speech reveals semantic brain maps matching distributional semantics"
        },
        {
          "doi": "10.1037/0033-295X.114.2.211",
          "note": "Landauer & Dumais (1997) Psych Rev - latent semantic analysis and distributional learning"
        }
      ],
      "last_reviewed": "2026-05-07",
      "file": "cross-domain/cognitive-science-linguistics/b-semantic-memory-word-vectors.yaml"
    },
    {
      "id": "b-childhood-learning-bayesian-concept-acquisition",
      "title": "Children acquire concepts and causal rules with remarkable speed and generalization from sparse data, a phenomenon explained by Bayesian concept learning — probabilistic inference over hypothesis spaces with strong structural priors, bridging cognitive science and Bayesian statistics.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "Tenenbaum & Griffiths (2001) showed that human concept learning matches Bayesian inference: given n positive examples of a concept, the learner infers the most probable hypothesis h by computing P(h|data) ∝ P(data|h)·P(h). The likelihood P(data|h) = (1/|h|)^n (size principle — smaller hypotheses are",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-rational-constructivism-bayesian-development"
      ],
      "communication_gap": "Developmental psychologists studying concept acquisition and Bayesian statisticians rarely collaborate directly; Bayesian cognitive science (Tenenbaum, Griffiths, Gopnik) has grown substantially but remains distinct from mainstream developmental psychology and is largely unknown to educational practitioners.\n",
      "translation_table": [
        {
          "field_a_term": "concept generalization from examples (cognitive science)",
          "field_b_term": "Bayesian posterior over hypothesis space (mathematics)",
          "note": "A child generalizes a word to new objects — Bayesian update of P(concept|examples)"
        },
        {
          "field_a_term": "size principle in concept learning (cognitive science)",
          "field_b_term": "likelihood ratio favoring specific over general hypotheses (mathematics)",
          "note": "Smaller concept extension has higher likelihood per example — the size principle"
        },
        {
          "field_a_term": "prior knowledge / core knowledge system (cognitive science)",
          "field_b_term": "prior probability distribution P(h) (mathematics)",
          "note": "Innate core knowledge corresponds to a strong structural prior over hypothesis space"
        },
        {
          "field_a_term": "one-shot learning / fast mapping (cognitive science)",
          "field_b_term": "Bayesian inference with informative prior (mathematics)",
          "note": "Strong prior + single example yields a confident posterior — explaining fast mapping"
        }
      ],
      "references": [
        {
          "doi": "10.1037/0033-295X.108.3.450",
          "note": "Tenenbaum & Griffiths (2001) — generalization, similarity, and Bayesian inference"
        },
        {
          "doi": "10.1037/0033-295X.116.2.246",
          "note": "Xu & Tenenbaum (2007) — word learning as Bayesian inference"
        },
        {
          "doi": "10.1037/0033-295X.116.4.961",
          "note": "Goodman et al. (2011) — rational analysis of rule-based concept learning"
        }
      ],
      "last_reviewed": "2026-05-07",
      "file": "cross-domain/cognitive-science-mathematics/b-childhood-learning-bayesian-concept-acquisition.yaml"
    },
    {
      "id": "b-free-energy-principle-stat-mech",
      "title": "Friston's free energy principle — biological systems minimise variational free energy F = E_q[log q(s) − log p(s,o)] — is formally identical to variational inference in machine learning and to Helmholtz free energy in thermodynamics, unifying perception, action, homeostasis, and learning.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "proposed",
      "bridge_claim": "Friston (2010) proposed that all biological self-organisation can be understood as the minimisation of variational free energy F, where:\n\n  F = E_q[log q(s)] − E_q[log p(s,o)]\n    = KL[q(s) || p(s|o)] − log p(o)\n\nHere q(s) is the organism's internal model of hidden states s, p(s,o) is a generative m",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-free-energy-aging"
      ],
      "communication_gap": "Friston's work is published primarily in neuroscience journals (Nature Reviews Neuroscience, Journal of Neuroscience) and is cited mainly within that field. Thermodynamicists and statistical physicists are largely unaware of the free energy principle. Machine learning researchers know variational inference (ELBO) but the cognitive neuroscience literature uses different notation. The philosophical framing (\"everything is free energy minimisation\") alienates physicists who prefer minimal formal claims. Beal's (2003) PhD thesis making the variational Bayes / Helmholtz connection explicit is widely cited in ML but rarely by neuroscientists.\n",
      "translation_table": [
        {
          "field_a_term": "Variational free energy F (cognitive science)",
          "field_b_term": "Helmholtz free energy A = U − TS (thermodynamics)",
          "note": "Both are upper bounds on surprise/non-equilibrium free energy; minimisation drives the system"
        },
        {
          "field_a_term": "KL-divergence KL[q || p] (complexity penalty)",
          "field_b_term": "Entropy cost −TS in free energy",
          "note": "Both penalise deviation from equilibrium/prior; information-geometric equivalent"
        },
        {
          "field_a_term": "Accuracy −E_q[log p(o|s)] (model fit)",
          "field_b_term": "Internal energy U (energy of current state)",
          "note": "Both measure the cost of the current configuration"
        },
        {
          "field_a_term": "Perception (inference about hidden states)",
          "field_b_term": "Equilibration (relaxation to minimum free energy state)",
          "note": "Both correspond to gradient descent on free energy w.r.t. internal variables"
        },
        {
          "field_a_term": "Action (changing the world to match predictions)",
          "field_b_term": "Work (changing external state to reduce free energy)",
          "note": "Both are free-energy-minimising operations on the environment"
        },
        {
          "field_a_term": "Generative model p(s,o)",
          "field_b_term": "Hamiltonian H(x) defining the energy landscape",
          "note": "Both specify the structure of the free energy functional"
        },
        {
          "field_a_term": "Active inference (minimise F by acting)",
          "field_b_term": "Dissipative structure (maintain order by exporting entropy)",
          "note": "Prigogine: living systems maintain low entropy by coupling to environment"
        }
      ],
      "references": [
        {
          "doi": "10.1038/nrn2787",
          "note": "Friston (2010) Nat Rev Neurosci 11:127 — free energy principle review"
        },
        {
          "note": "Helmholtz (1882) Wissenschaftliche Abhandlungen, Vol. 2 — free energy formulation"
        },
        {
          "note": "Beal (2003) Variational Algorithms for Approximate Bayesian Inference, PhD thesis, UCL"
        },
        {
          "note": "Parr, Pezzulo & Friston (2022) Active Inference: The Free Energy Principle in Mind, Brain, and Behavior (MIT Press)"
        },
        {
          "doi": "10.48550/arXiv.1312.6114",
          "note": "Kingma & Welling (2013) Auto-Encoding Variational Bayes — ELBO / VAE formulation"
        }
      ],
      "last_reviewed": "2026-05-06",
      "file": "cross-domain/cognitive-science-physics/b-free-energy-principle-stat-mech.yaml"
    },
    {
      "id": "b-collective-memory-distributed-cognition",
      "title": "Collective memory in social groups emerges from distributed cognitive processes across individuals and artifacts, bridging cognitive science and social science through the theory of extended and distributed cognition.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "Edwin Hutchins' distributed cognition framework shows that cognitive processes including memory extend beyond individual brains to encompass social networks and material artifacts; collective memory (Halbwachs) is thus a distributed cognitive system where recall accuracy is determined by the structu",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-transactive-memory-network-topology-performance"
      ],
      "communication_gap": "Cognitive psychologists study individual memory mechanisms while sociologists and historians study collective memory empirically and theoretically; Hutchins' distributed cognition framework explicitly bridges these but is less known in sociology than in cognitive science, and quantitative models of collective memory using network theory are rare.\n",
      "translation_table": [
        {
          "field_a_term": "working memory / long-term memory (cognitive science)",
          "field_b_term": "individual and collective social memory (social science)",
          "note": "Halbwachs' collective memory theory extends individual memory concepts to group level"
        },
        {
          "field_a_term": "cognitive schema / mental model (cognitive science)",
          "field_b_term": "cultural narrative / collective representation (social science)",
          "note": "Shared schemas constitute the collective cognitive framework for encoding social memory"
        },
        {
          "field_a_term": "memory consolidation / reconsolidation (cognitive science)",
          "field_b_term": "revision of historical narratives / commemorative practices (social science)",
          "note": "Social memory is reconstructed at each retrieval, susceptible to present-day biases"
        },
        {
          "field_a_term": "distributed representation in a neural network (cognitive science)",
          "field_b_term": "transactive memory system in a social group (social science)",
          "note": "Transactive memory distributes storage across group members analogous to distributed neural coding"
        }
      ],
      "references": [
        {
          "doi": "10.1017/CBO9780511626562",
          "note": "Hutchins (1995) Cognition in the Wild - distributed cognition in ship navigation"
        },
        {
          "doi": "10.1126/science.aat7663",
          "note": "Coman et al. (2016) - mnemonic convergence in social networks"
        },
        {
          "doi": "10.1037/0022-3514.61.6.923",
          "note": "Wegner (1987) - transactive memory: a contemporary analysis of the group mind"
        }
      ],
      "last_reviewed": "2026-05-07",
      "file": "cross-domain/cognitive-science-social-science/b-collective-memory-distributed-cognition.yaml"
    },
    {
      "id": "b-evolutionary-algorithms-natural-computation",
      "title": "Genetic algorithms and evolutionary strategies are computational implementations of Darwinian evolution — variation-selection-inheritance applied to candidate solutions — with formal equivalences to Fisher's fundamental theorem and population genetics.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "Holland's genetic algorithm (1975) implements natural selection on populations of candidate solutions: selection (fitness proportionate reproduction), crossover (genetic recombination), and mutation (random bit flips). The schema theorem states that short, above-average schemata (building blocks) gr",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-schema-theorem-replicator-equivalence"
      ],
      "communication_gap": "Computer science evolutionary algorithm courses rarely engage with theoretical population genetics; evolutionary biology courses rarely mention genetic algorithms. The formal equivalence is known in evolutionary computation but not widely taught as a two-way bridge that enriches both fields.\n",
      "translation_table": [
        {
          "field_a_term": "candidate solution (chromosome)",
          "field_b_term": "organism genotype",
          "note": "Encoded representation of a solution; both subject to selection and variation"
        },
        {
          "field_a_term": "fitness function",
          "field_b_term": "reproductive success",
          "note": "Objective function in optimisation = Darwinian fitness; both determine propagation rate"
        },
        {
          "field_a_term": "crossover (two-parent recombination)",
          "field_b_term": "sexual recombination / meiosis",
          "note": "Combines building blocks from two parents; disrupts tight schemata"
        },
        {
          "field_a_term": "schema theorem",
          "field_b_term": "Fisher's fundamental theorem of natural selection",
          "note": "Both state that above-average variants grow at a rate proportional to their excess fitness"
        },
        {
          "field_a_term": "mutation rate",
          "field_b_term": "genomic mutation rate per generation",
          "note": "Optimal mutation rate (1/L for binary strings) parallels Eigen's error threshold in molecular evolution"
        }
      ],
      "references": [
        {
          "note": "Holland, J.H. (1975). Adaptation in Natural and Artificial Systems. University of Michigan Press."
        },
        {
          "note": "Goldberg, D.E. (1989). Genetic Algorithms in Search, Optimization, and Machine Learning. Addison-Wesley."
        },
        {
          "doi": "10.1109/4235.585893",
          "note": "Wolpert, D.H. & Macready, W.G. (1997). No free lunch theorems for optimization. IEEE Trans Evol Comput 1:67."
        },
        {
          "doi": "10.1162/106365602320169811",
          "note": "Stanley, K.O. & Miikkulainen, R. (2002). Evolving neural networks through augmenting topologies. Evol Comput 10:99."
        }
      ],
      "last_reviewed": "2026-05-06",
      "file": "cross-domain/computer-science-biology/b-evolutionary-algorithms-natural-computation.yaml"
    },
    {
      "id": "b-reinforcement-learning-x-foraging-patch-models",
      "title": "Patch-foraging theory (leave-time optimization via marginal value theorem) parallels reinforcement-learning analyses of exploration versus exploitation in MDPs with episodic resource patches — patch residence policies resemble softmax or ε-greedy action policies under hazard-shaped rewards — linking ecology field studies with RL sample-efficiency benchmarks when environments embed latent patch quality.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "Charnov’s marginal value theorem predicts optimal patch departure when instantaneous intake falls below landscape-average reward rate — analogous to threshold stopping rules in restless bandits. Q-learning and actor–critic algorithms optimize discounted or average rewards while balancing exploration",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-reinforcement-learning-x-foraging-patch-models"
      ],
      "communication_gap": "Ecology textbooks cite Charnov yet seldom formalize Bellman equations explicitly; RL tutorials emphasize MDP notation without citing empirical marginal-value datasets from hummingbirds or bees carrying centuries of field biology credibility.\n",
      "translation_table": [
        {
          "field_a_term": "Marginal value threshold for leaving patch (ecology)",
          "field_b_term": "Stopping-time rule comparing continuation value vs outside option (RL/bandits)",
          "note": "Same optimal stopping structure under hazard-discounted rewards when formulated carefully."
        },
        {
          "field_a_term": "Travel time between patches",
          "field_b_term": "Mean revisit interval / MDP timestep penalties shaping exploration budgets",
          "note": "Travel costs mirror RL exploration penalties shaping regret bounds only indirectly via topology."
        },
        {
          "field_a_term": "Environmental stochasticity in prey renewal",
          "field_b_term": "Stochastic transitions P(s'|s,a) (RL)",
          "note": "Shared partially observed renewal assumptions motivate POMDP reductions for foragers with sensory limits."
        }
      ],
      "references": [
        {
          "doi": "10.1086/282878",
          "note": "Charnov (1976) Am. Nat. — marginal value theorem optimal patch use"
        },
        {
          "doi": "10.7551/mitpress/6737.001.0001",
          "note": "Sutton & Barto — Reinforcement Learning: An Introduction (MDP policy framing)"
        }
      ],
      "last_reviewed": "2026-05-07",
      "file": "cross-domain/computer-science-ecology/b-reinforcement-learning-x-foraging-patch-models.yaml"
    },
    {
      "id": "b-algorithmic-game-theory-internet",
      "title": "Algorithmic game theory analyses internet protocols, ad auctions, and platform economics as games with strategic self-interested agents — computing Nash equilibria for BGP routing, quantifying the price of anarchy for selfish routing, and implementing Vickrey-Clarke-Groves mechanisms at planetary scale in sponsored search auctions.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "CLASSICAL PROBLEM: Internet protocols (BGP routing, TCP congestion control) are designed for cooperative agents, but actual Internet is composed of self-interested autonomous systems (ASes) that may deviate for competitive advantage.\nPRICE OF ANARCHY (Koutsoupias & Papadimitriou 1999):\n  PoA = max_N",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-price-of-anarchy-internet-routing-empirical"
      ],
      "communication_gap": "Computer scientists and economists developed game-theoretic analysis of the Internet largely independently in the early 2000s. The two communities now interact through venues like ACM EC and WWW, but undergraduate CS programs rarely teach mechanism design and economics programs rarely teach computational complexity.\n",
      "translation_table": [
        {
          "field_a_term": "Nash equilibrium (game theory)",
          "field_b_term": "stable routing state / BGP convergence (networking)",
          "note": "BGP converges iff the routing game has a Nash equilibrium reachable by best-response dynamics"
        },
        {
          "field_a_term": "Price of Anarchy (game theory)",
          "field_b_term": "efficiency loss from selfish routing vs. traffic engineering",
          "note": "PoA bounds how much worse Internet routing is than centrally optimised routing"
        },
        {
          "field_a_term": "VCG mechanism (mechanism design)",
          "field_b_term": "second-price sealed-bid ad auction (internet advertising)",
          "note": "GSP auction in sponsored search is VCG-like; theoretical analysis predicts approximately-truthful equilibria"
        },
        {
          "field_a_term": "dominant strategy equilibrium (game theory)",
          "field_b_term": "incentive-compatible protocol (computer science)",
          "note": "A protocol is incentive-compatible if honest participation is a dominant strategy — the computational counterpart of dominant strategy Nash equilibrium"
        }
      ],
      "references": [
        {
          "note": "Nisan et al. (2007) Algorithmic Game Theory, Cambridge UP"
        },
        {
          "doi": "10.1007/3-540-48413-4_58",
          "note": "Koutsoupias & Papadimitriou (1999) STACS — original price of anarchy"
        },
        {
          "note": "Varian (2007) J Econ Perspect 21:191 — Google ad auction mechanism"
        },
        {
          "note": "Roughgarden (2002) Proceedings 34th STOC — tight PoA bounds for selfish routing"
        }
      ],
      "last_reviewed": "2026-05-06",
      "file": "cross-domain/computer-science-economics/b-algorithmic-game-theory-internet.yaml"
    },
    {
      "id": "b-approximation-algorithms-sdp",
      "title": "Semidefinite programming (SDP) relaxation provides the tightest tractable approximation for NP-hard combinatorial optimization problems: Goemans- Williamson MAX-CUT achieves a 0.878-approximation ratio (optimal under the Unique Games Conjecture) by relaxing binary variables to unit vectors on the semidefinite cone, with the Lovász theta function providing tight bounds on graph independence number and chromatic number.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "SDP generalizes linear programming: minimize Tr(CX) subject to linear matrix inequalities A_i·X = b_i and X ≽ 0 (positive semidefinite). X ≽ 0 replaces the linear constraint x_i ∈ [0,1] (LP relaxation) with the stronger constraint that X is a Gram matrix of unit vectors — X_ij = v_i·v_j where ||v_i|",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-sdp-rounding-universal-approximation-ratio-tight-ugc"
      ],
      "communication_gap": "The Goemans-Williamson result (1995) is central to theoretical computer science but largely unknown in the applied mathematics community that uses SDP for control (LMI conditions), communications (MIMO detection), and finance (portfolio optimization). The connection between GW rounding and SDP theory is understood by complexity theorists but rarely appears in optimization textbooks used by engineers. The Unique Games Conjecture remains one of the most consequential open problems in mathematics but is unknown outside TCS.\n",
      "translation_table": [
        {
          "field_a_term": "binary variable x_i ∈ {-1,+1} (integer program)",
          "field_b_term": "unit vector v_i ∈ S^{n-1} (SDP relaxation)",
          "note": "The \"rounding\" step maps the continuous SDP solution back to discrete binary values"
        },
        {
          "field_a_term": "adjacency matrix A_ij of graph G",
          "field_b_term": "correlation matrix X_ij = v_i·v_j in SDP",
          "note": "Negative X_ij corresponds to opposite sides of the cut — vectors push apart"
        },
        {
          "field_a_term": "random hyperplane rounding (GW algorithm)",
          "field_b_term": "probabilistic rounding function achieving approximation ratio guarantee",
          "note": "arccos(ρ)/π ≥ 0.878·(1-ρ)/2 for all ρ ∈ [-1,1] — tight near ρ = -1"
        },
        {
          "field_a_term": "Unique Games Conjecture (Khot 2002)",
          "field_b_term": "hardness of approximation certification — SDP rounding is the best possible",
          "note": "UGC implies GW is tight; UGC itself remains unproven (major open problem)"
        },
        {
          "field_a_term": "Lovász theta ϑ(G) (computable SDP)",
          "field_b_term": "polytime certificate bounding NP-hard graph quantities",
          "note": "ϑ(G) = max-weight independent set in a semidefinite sense; equals Shannon capacity for Petersen graph"
        }
      ],
      "references": [
        {
          "doi": "10.1145/227683.227684",
          "note": "Goemans & Williamson (1995) Improved approximation algorithms for maximum cut and satisfiability problems using semidefinite programming. J ACM 42:1115"
        },
        {
          "doi": "10.1145/509907.510016",
          "note": "Khot (2002) On the power of unique 2-prover 1-round games. Proc 34th STOC:767"
        },
        {
          "doi": "10.1109/TIT.1979.1056011",
          "note": "Lovász (1979) On the Shannon capacity of a graph. IEEE Trans Inf Theory 25:1"
        },
        {
          "doi": "10.1137/1038003",
          "note": "Vandenberghe & Boyd (1996) Semidefinite programming. SIAM Rev 38:49"
        }
      ],
      "last_reviewed": "2026-05-06",
      "file": "cross-domain/computer-science-mathematics/b-approximation-algorithms-sdp.yaml"
    },
    {
      "id": "b-cellular-automata-computational-universality",
      "title": "Cellular automata with simple local rules can achieve computational universality (Turing completeness), demonstrated by Conway's Game of Life and Wolfram's Rule 110, connecting discrete dynamical systems to computability theory through the mathematical equivalence of local state-update rules to universal Turing machine tape operations",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "A cellular automaton is computationally universal if it can simulate any Turing machine: Wolfram's Rule 110 (a 1D elementary CA) is Turing complete (Cook, 2004), and Conway's Game of Life implements logic gates and memory via glider/eater/still-life patterns; the universality proof constructs a Turi",
      "open_unknowns": [],
      "related_hypotheses": [],
      "communication_gap": "Computer scientists study complexity and computability theoretically while mathematicians and physicists study cellular automata as discrete dynamical systems; the universality theorems connecting local dynamics to global computability are well-known in theoretical computer science but poorly integrated with dynamical systems analysis of CA behavior.",
      "translation_table": [
        {
          "field_a_term": "cellular automaton local update rule f: S^N → S (computer science)",
          "field_b_term": "transition function of a Turing machine (mathematics)",
          "note": "Local CA rule is O(|S|^(2r+1)) table; universal CAs embed TM transition function in complex global dynamics"
        },
        {
          "field_a_term": "glider in Game of Life (computer science)",
          "field_b_term": "moving signal carrying information through the tape (mathematics)",
          "note": "Gliders perform the role of signal wires; glider guns are clocked oscillators; collisions implement logic gates"
        },
        {
          "field_a_term": "Rule 110 elementary CA (computer science)",
          "field_b_term": "2-symbol, 1-tape Turing machine simulation (mathematics)",
          "note": "Cook (2004) proved Rule 110 Turing complete using tag system intermediate encoding"
        },
        {
          "field_a_term": "complex behavior in Wolfram's Class IV CAs (computer science)",
          "field_b_term": "undecidability of prediction (halting problem analog) (mathematics)",
          "note": "Class IV CAs exhibit computational irreducibility: no shortcut exists to predict long-term evolution"
        }
      ],
      "references": [
        {
          "doi": "10.1007/978-3-540-92910-9_14",
          "note": "Cook (2004) - proof that Rule 110 cellular automaton is Turing complete"
        },
        {
          "doi": "10.1017/CBO9780511753695",
          "note": "Wolfram (2002) - A New Kind of Science: classification of CA complexity and universality"
        },
        {
          "doi": "10.1016/0024-3795(84)90085-6",
          "note": "Gardner (1970) - Conway's Game of Life and glider universality"
        }
      ],
      "last_reviewed": "2026-05-07",
      "file": "cross-domain/computer-science-mathematics/b-cellular-automata-computational-universality.yaml"
    },
    {
      "id": "b-complexity-phase-transitions",
      "title": "Computational complexity and phase transitions — NP-hard problem hardness exhibits thermodynamic-like phase transitions governed by the same statistical physics of disordered systems",
      "source_domain": "",
      "target_domain": "",
      "status": "proposed",
      "bridge_claim": "Many NP-complete problems (3-SAT, graph coloring, random k-SAT, traveling salesman) exhibit sharp phase transitions in their typical-case hardness as a control parameter varies. In random k-SAT: let α = clauses/variables. Below α_c (SAT phase): almost all instances are satisfiable, easily solved by ",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-rsg-transition-separates-polynomial-exponential-regimes"
      ],
      "communication_gap": "Theoretical computer scientists and statistical physicists have largely separate literature cultures despite deep mathematical connections. The Mézard et al. (2002) Science paper was a landmark bridge publication. Theoretical CS tends to focus on worst-case complexity; statistical physics on typical-case behavior. The cavity method has yet to be fully absorbed into the CS algorithms curriculum. The P vs NP problem is framed as a question about worst-case complexity, while physics approaches address typical-case hardness — different but related questions.\n",
      "translation_table": [
        {
          "field_a_term": "clause-to-variable ratio α",
          "field_b_term": "inverse temperature β (or disorder parameter)",
          "note": "α plays the role of the control parameter that drives the phase transition"
        },
        {
          "field_a_term": "satisfying assignment (solution)",
          "field_b_term": "low-energy configuration in spin system",
          "note": "Finding a solution = finding a ground state; random walk = thermal fluctuation"
        },
        {
          "field_a_term": "UNSAT phase (no solutions)",
          "field_b_term": "ordered phase (no ground state entropy)",
          "note": "Above α_c: problem almost surely has no solution; ordered phase has unique ground state"
        },
        {
          "field_a_term": "clustering of solutions (solution space fragmentation)",
          "field_b_term": "replica symmetry breaking (spin glass phase)",
          "note": "Solutions cluster into exponentially many well-separated basins — makes search hard"
        },
        {
          "field_a_term": "survey propagation algorithm",
          "field_b_term": "belief propagation on factor graph (message passing)",
          "note": "Derived from cavity method; works at RSB level; solves instances DPLL cannot"
        },
        {
          "field_a_term": "backtracking plateau in DPLL solver",
          "field_b_term": "metastable trapping in spin glass landscape",
          "note": "Both reflect the same barrier structure in the solution space"
        }
      ],
      "references": [
        {
          "doi": "10.1126/science.1073287",
          "note": "Mézard et al. (2002) Science 297:812 — survey propagation and the cavity method for random k-SAT"
        },
        {
          "doi": "10.1038/22055",
          "note": "Monasson et al. (1999) Nature 400:133 — determining computational complexity from phase transitions"
        },
        {
          "note": "Cook (1971) STOC — original NP-completeness paper for SAT"
        },
        {
          "doi": "10.1073/pnas.0703685104",
          "note": "Krzakala et al. (2007) PNAS 104:10318 — clustering and survey propagation"
        }
      ],
      "last_reviewed": "2026-05-06",
      "file": "cross-domain/computer-science-mathematics/b-complexity-phase-transitions.yaml"
    },
    {
      "id": "b-computational-irreducibility-wolfram-rule110",
      "title": "Wolfram's computational irreducibility principle states that the only way to determine the future state of certain simple computational systems (notably Rule 110 cellular automata, which is Turing-complete) is to run them step by step - no shortcut exists - connecting the halting problem in computability theory to the limits of mathematical prediction in physical and complex systems.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "contested",
      "bridge_claim": "Rule 110 is a one-dimensional cellular automaton (1D CA) with 2 states and a specific local rule. Cook (2004) proved it is Turing-complete: it can simulate any Turing machine. This means no algorithm can predict its state at time T faster than running T steps of the CA - it is computationally irredu",
      "open_unknowns": [],
      "related_hypotheses": [],
      "communication_gap": "Computer scientists focus on formal computational complexity bounds (P, NP, PSPACE) while Wolfram's framework uses informal language that is contested in complexity theory; mathematicians studying cellular automata prove formal results (universality, undecidability) that are rarely communicated to the broad scientific community as limits on physical prediction.\n",
      "translation_table": [
        {
          "field_a_term": "Turing completeness (computer science)",
          "field_b_term": "Rule 110 can simulate any computation (mathematics)",
          "note": "Cook (2004) proved Rule 110 Turing-complete; key step in computational irreducibility argument"
        },
        {
          "field_a_term": "halting problem undecidability (computer science)",
          "field_b_term": "no algorithm decides Rule 110 future state faster than simulation (mathematics)",
          "note": "Undecidability of halting -> no shortcut for computationally irreducible systems"
        },
        {
          "field_a_term": "computational complexity class P vs. EXPTIME (computer science)",
          "field_b_term": "difference between predictable vs. irreducible physical systems (mathematics)",
          "note": "Computationally reducible systems have poly-time predictions; irreducible require exp-time simulation"
        },
        {
          "field_a_term": "computational equivalence principle (computer science)",
          "field_b_term": "claim that all sufficiently complex systems are equivalent in computational power (mathematics)",
          "note": "Wolfram's most contested claim; not proven; relies on universality of Rule 110 and similar CAs"
        }
      ],
      "references": [
        {
          "doi": "10.3138/9781442637085",
          "note": "Wolfram (2002) - A New Kind of Science; computational irreducibility and Rule 110"
        },
        {
          "doi": "10.1007/978-3-642-55385-4_1",
          "note": "Cook (2004) - universality in elementary cellular automata; Rule 110 Turing completeness proof"
        },
        {
          "doi": "10.1145/3321486",
          "note": "Aaronson (2020) - computational complexity: a modern approach (selected chapters)"
        }
      ],
      "last_reviewed": "2026-05-07",
      "file": "cross-domain/computer-science-mathematics/b-computational-irreducibility-wolfram-rule110.yaml"
    },
    {
      "id": "b-deep-equilibrium-models-x-fixed-point-iteration",
      "title": "Deep equilibrium networks (DEQs) define implicit layers by finding z* such that z* = f_θ(z*; x) — training uses implicit differentiation rooted in fixed-point / monotonic operator theory — connecting modern implicit deep learning to classical numerical analysis of Banach iterations, Anderson acceleration, and Jacobian-based sensitivity formulas.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "Forward inference solves z = f(z) via root-finding or fixed-point iteration; reverse-mode derivatives apply the implicit function theorem (I − J)^{-1} structure analogous to adjoint sensitivity analysis for equilibrium constraints — practical DEQs trade finite-depth unrolls for convergence diagnosti",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-anderson-acceleration-deq-forward-steps-correlate-with-val-loss"
      ],
      "communication_gap": "Deep learning courses spotlight autograd through layers while numerical PDE courses teach Newton–Krylov schemes — implicit neural papers deliberately reunify these traditions yet textbooks rarely cross-list prerequisites.\n",
      "translation_table": [
        {
          "field_a_term": "fixed-point equation z = f_θ(z; x)",
          "field_b_term": "nonlinear residual F(z)=0 formulations in numerical analysis",
          "note": "Equivalent root-finding viewpoints with different solver emphasis."
        },
        {
          "field_a_term": "implicit differentiation via adjoint linear solve involving Jacobian inverse",
          "field_b_term": "sensitivity analysis of steady-state constrained dynamical systems",
          "note": "Same linear algebra backbone as equilibrium continuation methods at linear algebra level."
        },
        {
          "field_a_term": "solver tolerance ε controlling DEQ forward convergence",
          "field_b_term": "termination criteria in damped fixed-point / Newton-Krylov solvers",
          "note": "Shared engineering concerns about backward stability versus gradient bias."
        }
      ],
      "references": [
        {
          "doi": "10.48550/arXiv.1909.01377",
          "note": "Bai, Kolter & Koltun (2019) — deep equilibrium models (ICLR)"
        },
        {
          "doi": "10.1137/1.9781611970944",
          "note": "Kelley (1995) — iterative methods for linear and nonlinear equations (fixed-point foundation)"
        }
      ],
      "last_reviewed": "2026-05-07",
      "file": "cross-domain/computer-science-mathematics/b-deep-equilibrium-models-x-fixed-point-iteration.yaml"
    },
    {
      "id": "b-legal-argumentation-formal-logic",
      "title": "Legal reasoning can be formalized as abstract argumentation frameworks where arguments and their defeat relations determine the set of legally justified conclusions via extension semantics",
      "source_domain": "",
      "target_domain": "",
      "status": "proposed",
      "bridge_claim": "Dung's abstract argumentation framework AF = (AR, attacks) maps legal arguments to nodes and legal rebuttals/undercutters to directed edges, with grounded, preferred, and stable extension semantics providing formal definitions of which legal conclusions are justified, contested, or defeated under di",
      "open_unknowns": [],
      "related_hypotheses": [],
      "communication_gap": "Legal scholars analyze arguments qualitatively using doctrine while computer scientists develop formal argumentation systems; the operationalization of legal concepts (burden of proof, standards of evidence) into formal semantics remains contested between the two communities.",
      "translation_table": [
        {
          "field_a_term": "legal argument (claim with supporting reasons)",
          "field_b_term": "node in Dung's argumentation framework",
          "note": "Arguments are abstract; their internal structure (ASPIC+, Carneades) can be separately formalized"
        },
        {
          "field_a_term": "legal rebuttal or undercutter",
          "field_b_term": "directed attack edge A -> B in (AR, attacks)",
          "note": "A attacks B means A, if accepted, defeats B; rebuttal attacks conclusion, undercutter attacks inference rule"
        },
        {
          "field_a_term": "legal justification under preponderance of evidence",
          "field_b_term": "grounded extension: the unique minimal complete extension",
          "note": "Grounded extension is skeptical: only accept what cannot be defeated; maps to beyond-reasonable-doubt standard"
        },
        {
          "field_a_term": "legal justification under balance of probabilities",
          "field_b_term": "preferred extension: maximal admissible set",
          "note": "Preferred extensions are credulous: accept as much as possible without inconsistency"
        },
        {
          "field_a_term": "burden of proof allocation",
          "field_b_term": "asymmetric attack strength or priority ordering on arguments",
          "note": "ASPIC+ formalizes burden allocation through argument ordering; rebuttals only succeed against equal or weaker arguments"
        }
      ],
      "references": [
        {
          "doi": "10.1016/0004-3702(95)00021-9",
          "note": "Dung (1995) - On the acceptability of arguments: foundational paper on abstract argumentation"
        },
        {
          "doi": "10.1007/s10506-006-9036-x",
          "note": "Prakken & Sartor (2006) - law and logic: a review from an argumentation perspective"
        },
        {
          "doi": "10.1016/j.artint.2010.11.009",
          "note": "Modgil & Prakken (2012) - ASPIC+ framework for structured argumentation"
        }
      ],
      "last_reviewed": "2026-05-07",
      "file": "cross-domain/computer-science-mathematics/b-legal-argumentation-formal-logic.yaml"
    },
    {
      "id": "b-ml-generalization-pac-learning-vc-dimension",
      "title": "Machine learning generalization — the ability of a model to perform well on unseen data — is formalized by PAC learning theory and bounded by the Vapnik-Chervonenkis (VC) dimension: a hypothesis class is PAC-learnable if and only if it has finite VC dimension, providing a mathematical foundation for why learning is or is not possible.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "PAC (Probably Approximately Correct) learning: a hypothesis class H is ε-δ PAC-learnable if for all ε,δ > 0 there exists a sample complexity m ≥ (1/ε)[ln|H| + ln(1/δ)] (finite H) such that with probability ≥ 1-δ, any ERM learner returns h with error ≤ ε. For infinite H, the VC dimension d_VC = VC(H)",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-implicit-regularization-sgd-explains-deep-learning-generalization"
      ],
      "communication_gap": "PAC learning theory was developed by Valiant (1984) and formalized by VC theory in the 1970s-80s; it became the dominant theoretical framework for ML generalization, but the success of deep learning has exposed fundamental gaps between VC-dimension predictions and empirical behavior, creating an active bridge problem between theoretical computer science, statistics, and deep learning practitioners.\n",
      "translation_table": [
        {
          "field_a_term": "generalization error (machine learning)",
          "field_b_term": "true risk R(h) minus empirical risk R_emp(h) (statistical learning theory)",
          "note": "Generalization gap is bounded by VC/Rademacher complexity via uniform convergence theorems"
        },
        {
          "field_a_term": "overfitting (machine learning)",
          "field_b_term": "high variance / low bias regime in bias-variance tradeoff (mathematics)",
          "note": "VC theory quantifies when empirical risk is a poor proxy for true risk"
        },
        {
          "field_a_term": "model complexity (machine learning)",
          "field_b_term": "VC dimension d_VC or Rademacher complexity (mathematics)",
          "note": "VC dimension is the formal complexity measure determining the PAC sample complexity"
        },
        {
          "field_a_term": "train/test split (machine learning)",
          "field_b_term": "empirical vs true risk estimation (statistics)",
          "note": "Test set provides unbiased estimate of true risk; PAC theory bounds the required test size"
        }
      ],
      "references": [
        {
          "doi": "10.1145/1968.1972",
          "note": "Valiant (1984) - A theory of the learnable (original PAC paper)"
        },
        {
          "doi": "10.1109/TIT.1971.1054643",
          "note": "Vapnik & Chervonenkis (1971) - on the uniform convergence of relative frequencies (VC theory)"
        },
        {
          "doi": "10.1126/science.1238411",
          "note": "Shalev-Shwartz & Ben-David (2014) - Understanding Machine Learning (textbook reference)"
        }
      ],
      "last_reviewed": "2026-05-07",
      "file": "cross-domain/computer-science-mathematics/b-ml-generalization-pac-learning-vc-dimension.yaml"
    },
    {
      "id": "b-number-field-sieve-cryptographic-hardness",
      "title": "The number field sieve (NFS) algorithm achieves sub-exponential complexity L_n[1/3, c] = exp((c+o(1)) * (ln n)^{1/3} * (ln ln n)^{2/3}) for integer factorization, establishing the precise complexity-theoretic boundary on RSA and discrete logarithm hardness that makes modern public-key cryptography quantifiably secure against classical computation while simultaneously defining the cryptanalytic target for quantum speedup",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "The NFS algorithm for factoring n applies algebraic number theory (number fields with rings of integers, ideal factorization in class groups) to the combinatorial sieve: it finds pairs (a,b) such that F(a,b) and G(a,b) are both B-smooth (have all prime factors < B), then uses Gaussian elimination ov",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-nfs-rsa-concrete-security-boundary"
      ],
      "communication_gap": "Number theorists develop and analyze the NFS mathematically while cryptographic standards bodies translate complexity results into concrete key-size recommendations; the gap between theoretical L_n[1/3,c] asymptotics and the actual practical record factorizations that drive NIST key-size recommendations is not always transparent to either community.",
      "translation_table": [
        {
          "field_a_term": "smooth number (all prime factors < B) in NFS sieve (mathematics)",
          "field_b_term": "B-smooth integer in the sieving step of integer factorization (computer science)",
          "note": "Smoothness probability psi(x,B)/x ~ rho(u) (u = log x / log B) from analytic number theory sets the algorithm cost"
        },
        {
          "field_a_term": "number field and ring of integers O_K in NFS (mathematics)",
          "field_b_term": "algebraic structure providing two different factorization maps to Z (mathematics)",
          "note": "Two polynomial maps from Z to smooth factorizations; algebraic number field provides the second map enabling square root step"
        },
        {
          "field_a_term": "RSA modulus n = p*q (cryptography)",
          "field_b_term": "integer factorization problem input (mathematics)",
          "note": "Security of RSA is equivalent to hardness of factoring n; NFS cost gives the concrete bit security as a function of |n|"
        },
        {
          "field_a_term": "lattice sieving in the NFS factor base (mathematics)",
          "field_b_term": "lattice-based combinatorial optimization over Z^2 (mathematics)",
          "note": "Modern NFS uses lattice sieving (Lenstra) to enumerate (a,b) pairs efficiently; same lattice geometry as LLL reduction"
        }
      ],
      "references": [
        {
          "doi": "10.1007/3-540-49264-X_1",
          "note": "Lenstra & Lenstra (1993) - development of the number field sieve (foundational NFS reference)"
        },
        {
          "doi": "10.1007/s00145-002-0148-6",
          "note": "Coppersmith (1993) - modifications to the number field sieve (NFS complexity analysis)"
        },
        {
          "doi": "10.1145/3386134",
          "note": "Bardet et al. (2020) - record discrete logarithm and factorization; NFS benchmark"
        }
      ],
      "last_reviewed": "2026-05-07",
      "file": "cross-domain/computer-science-mathematics/b-number-field-sieve-cryptographic-hardness.yaml"
    },
    {
      "id": "b-randomized-algorithms-probabilistic-method",
      "title": "The probabilistic method (Erdős) proves combinatorial existence by showing random objects have a desired property with positive probability; randomized algorithms exploit this computationally, and derandomization bridges the two via conditional expectations, unifying combinatorics and algorithm design.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "The probabilistic method (Erdős 1947): to prove that a combinatorial object with property P exists, construct a suitable probability space, show the random object lacks property P with probability < 1. No explicit construction needed — existence follows from probability. This is a pure mathematical ",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-jl-lemma-optimal-dimension-reduction"
      ],
      "communication_gap": "Erdős worked primarily in combinatorics; the algorithmic implications were developed by the theory of computing community (Rabin, Solovay, Strassen, Motwani) beginning in the 1970s. The unification through derandomization (Nisan, Wigderson, Alon) is a major achievement of theoretical computer science. Applied machine learning practitioners routinely use Johnson–Lindenstrauss projections and randomized SVD without knowing the combinatorial probabilistic method foundations.\n",
      "translation_table": [
        {
          "field_a_term": "probabilistic existence proof (Pr[P holds] > 0)",
          "field_b_term": "expected-case guarantee in randomized algorithm",
          "note": "both rely on random variable analysis over the same probability space"
        },
        {
          "field_a_term": "Lovász Local Lemma (sparse bad events can all be avoided)",
          "field_b_term": "Moser–Tardos constructive proof (algorithmic LLL gives polynomial construction)"
        },
        {
          "field_a_term": "Chernoff bound (exponential concentration of sums)",
          "field_b_term": "high-probability guarantee in randomized rounding algorithms"
        },
        {
          "field_a_term": "second moment method (Var[X] ≤ E[X²])",
          "field_b_term": "analysis of collision probability in hashing algorithms"
        },
        {
          "field_a_term": "random graph G(n,p) (Erdős–Rényi)",
          "field_b_term": "average-case model for network algorithm analysis"
        },
        {
          "field_a_term": "method of conditional expectations",
          "field_b_term": "derandomization — turning probabilistic proof into deterministic algorithm"
        },
        {
          "field_a_term": "Johnson–Lindenstrauss lemma (dimension reduction)",
          "field_b_term": "random projection preprocessing in machine learning pipelines"
        }
      ],
      "references": [
        {
          "note": "Alon & Spencer (2016) The Probabilistic Method, 4th ed. Wiley"
        },
        {
          "doi": "10.1090/conm/026/737400",
          "note": "Johnson & Lindenstrauss (1984) Extensions of Lipschitz maps into a Hilbert space. Contemp Math 26:189"
        },
        {
          "note": "Motwani & Raghavan (1995) Randomized Algorithms. Cambridge University Press"
        },
        {
          "doi": "10.1007/BF02579385",
          "note": "Raghavan & Thompson (1987) Randomized rounding. Combinatorica 7:365"
        }
      ],
      "last_reviewed": "2026-05-06",
      "file": "cross-domain/computer-science-mathematics/b-randomized-algorithms-probabilistic-method.yaml"
    },
    {
      "id": "b-sat-phase-transition",
      "title": "Random 3-SAT undergoes a sharp satisfiability phase transition at clause-to-variable ratio α ≈ 4.267 — the computational hardness peak maps onto a spin-glass phase transition (replica-symmetry breaking), linking P vs. NP to the statistical physics of disordered systems.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "proposed",
      "bridge_claim": "A random 3-SAT instance with n variables and m = αn clauses (each clause containing 3 random variables in random polarity) undergoes a sharp phase transition at critical ratio α_c ≈ 4.267 (Kirkpatrick & Selman 1994; rigorous bounds by Mézard et al. 2002): - α < α_c: instance is satisfiable with prob",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-spin-glass-p-np-separation"
      ],
      "communication_gap": "Computer scientists studying algorithm complexity and statistical physicists studying disordered systems independently discovered similar phase transitions in the 1990s. The communities have converged somewhat (the survey propagation work was a landmark collaboration), but the formal connection between computational complexity classes and equilibrium statistical mechanics phase diagrams remains incomplete. The P≠NP question is studied as pure mathematics in complexity theory but as a physics problem in the statistical mechanics of computation community — rare cross-pollination exists between STOC/FOCS proceedings and Physical Review Letters.\n",
      "translation_table": [
        {
          "field_a_term": "Boolean variable x_i",
          "field_b_term": "Ising spin σ_i ∈ {-1, +1}"
        },
        {
          "field_a_term": "3-SAT clause (disjunction of 3 literals)",
          "field_b_term": "3-spin interaction term in random energy model"
        },
        {
          "field_a_term": "clause-to-variable ratio α = m/n",
          "field_b_term": "inverse temperature / coupling strength in spin glass"
        },
        {
          "field_a_term": "satisfiable instance (α < α_c)",
          "field_b_term": "paramagnetic phase (replica-symmetric, single Gibbs state)"
        },
        {
          "field_a_term": "unsatisfiable instance (α > α_c)",
          "field_b_term": "frustrated phase (no zero-energy ground state)"
        },
        {
          "field_a_term": "hardest instances at α ≈ α_c",
          "field_b_term": "replica-symmetry breaking / spin-glass phase"
        },
        {
          "field_a_term": "exponential solver runtime at phase transition",
          "field_b_term": "exponential ground-state search in spin glass (no polynomial algorithm known)"
        },
        {
          "field_a_term": "DPLL backtracking algorithm",
          "field_b_term": "simulated annealing in energy landscape"
        },
        {
          "field_a_term": "survey propagation algorithm",
          "field_b_term": "cavity method / belief propagation from statistical physics"
        }
      ],
      "references": [
        {
          "doi": "10.1126/science.264.5163.1297",
          "note": "Kirkpatrick & Selman (1994) Science 264:1297 — discovery of SAT phase transition"
        },
        {
          "doi": "10.1126/science.297.5582.812",
          "note": "Mézard, Parisi & Zecchina (2002) Science 297:812 — survey propagation / cavity method"
        },
        {
          "note": "Cook (1971) STOC — NP-completeness of SAT"
        },
        {
          "doi": "10.1038/22055",
          "note": "Monasson et al. (1999) Nature 400:133 — determining computational complexity of 3-SAT"
        },
        {
          "doi": "10.1073/pnas.97.2.792",
          "note": "Mézard & Montanari (2009) — information, physics, computation (book)"
        }
      ],
      "last_reviewed": "2026-05-06",
      "file": "cross-domain/computer-science-mathematics/b-sat-phase-transition.yaml"
    },
    {
      "id": "b-type-theory-logic-curry-howard",
      "title": "The Curry-Howard correspondence establishes propositions-as-types, proofs-as-programs — making every mathematical proof a computer program and every type-checking computation a proof verification",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "The Curry-Howard correspondence (Curry 1934, Howard 1980) reveals a deep structural identity between formal logic and type theory in programming languages: propositions correspond to types, proofs correspond to programs, and logical connectives correspond to type constructors. Specifically: conjunct",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-homotopy-type-theory-completes-curry-howard-for-homotopy-theory"
      ],
      "communication_gap": "Mathematicians writing proofs in natural language and programmers writing code rarely interact. The correspondence is well-known to programming language theorists but under-appreciated in mainstream mathematics and software engineering. Educational silos mean most CS students learn types without logic and most math students learn logic without types.\n",
      "translation_table": [
        {
          "field_a_term": "proposition P",
          "field_b_term": "type T",
          "note": "A proposition is identified with the type of its proofs"
        },
        {
          "field_a_term": "proof of P",
          "field_b_term": "term of type T (program)",
          "note": "A proof is a construction — literally a program of the corresponding type"
        },
        {
          "field_a_term": "logical implication A→B",
          "field_b_term": "function type A→B",
          "note": "Modus ponens = function application; a proof of A→B applied to a proof of A yields a proof of B"
        },
        {
          "field_a_term": "universal quantification ∀x.P(x)",
          "field_b_term": "dependent product type Π(x:A).P(x)",
          "note": "Quantification over individuals corresponds to parametric polymorphism"
        },
        {
          "field_a_term": "cut elimination (normalisation)",
          "field_b_term": "beta reduction (program execution)",
          "note": "Proof simplification corresponds exactly to program evaluation"
        }
      ],
      "references": [
        {
          "note": "Curry (1934) — functionality in combinatory logic, first observation"
        },
        {
          "note": "Howard (1980) — formulae-as-types notion of construction"
        },
        {
          "note": "Martin-Löf (1984) Intuitionistic Type Theory — dependent type theory"
        },
        {
          "note": "Wadler (2015) — propositions as types (historical survey)",
          "doi": "10.1145/2699407"
        }
      ],
      "last_reviewed": "2026-05-06",
      "file": "cross-domain/computer-science-mathematics/b-type-theory-logic-curry-howard.yaml"
    },
    {
      "id": "b-softmax-attention-x-cortical-divisive-normalization",
      "title": "Transformer softmax attention maps token compatibilities through exponentiated scores normalized across keys — paralleling neural models of cortical normalization and gain control where responses are divided by pooled activity to sharpen stimulus contrast and implement competitive dynamics across a neuronal population.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "proposed",
      "bridge_claim": "Attention weights are a_ij = softmax_j(q_i · k_j / √d): nonnegative, sum-to-one over j for fixed i, resembling a divisive normalization across locations/channels after an expansive nonlinearity (exp). Canonical cortical normalization (Carandini–Heeger style) divides each unit response by a weighted ",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-softmax-attention-x-cortical-divisive-normalization"
      ],
      "communication_gap": "Deep learning papers rarely cite canonical neuroscience normalization literature; neuroscience papers on transformers often rely on metaphor rather than quantitative mapping between softmax logits and membrane currents.\n",
      "translation_table": [
        {
          "field_a_term": "Softmax over attention logits (exp then normalize)",
          "field_b_term": "Divisive normalization denominator (surround sum + σ)",
          "note": "Both convert raw comparisons into relative gains bounded by a partition function–like denominator."
        },
        {
          "field_a_term": "Temperature / √d scaling in attention",
          "field_b_term": "Gain or noise parameter controlling sharpness of competition in normalization models",
          "note": "Larger temperature softens attention peaks analogously to weaker surround inhibition broadening tuning."
        },
        {
          "field_a_term": "Multi-head attention channels",
          "field_b_term": "Parallel feature maps or cortical columns with distinct normalization pools",
          "note": "Heads specialize filters; cortical layers parallel multiple gain-control pathways with different pooling footprints."
        }
      ],
      "references": [
        {
          "doi": "10.1038/371521a0",
          "note": "Carandini & Heeger (1994) Nature — contrast adaptation and cortical gain control"
        },
        {
          "doi": "10.48550/arXiv.1706.03762",
          "note": "Vaswani et al. (2017) NeurIPS — Attention Is All You Need (transformer attention)"
        }
      ],
      "last_reviewed": "2026-05-07",
      "file": "cross-domain/computer-science-neuroscience/b-softmax-attention-x-cortical-divisive-normalization.yaml"
    },
    {
      "id": "b-transformer-attention-neural-attention",
      "title": "The transformer's scaled dot-product attention mechanism is a computational formalisation of neural attention theories from cognitive neuroscience — scaled dot-product Q·Kᵀ/√d_k implements a soft winner-take-all competition analogous to cortical inhibitory circuits, while self-attention corresponds to lateral inhibition combined with top-down modulatory feedback.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "proposed",
      "bridge_claim": "The transformer attention mechanism (Vaswani et al. 2017):\n\n  Attention(Q, K, V) = softmax(QKᵀ / √d_k) V\n\noperates on queries Q, keys K, and values V. Each output position attends to all input positions with a weight proportional to the query-key similarity divided by √d_k. This structure maps onto ",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-transformer-neural-attention-alignment"
      ],
      "communication_gap": "The transformer architecture (Vaswani et al. 2017) was developed by machine learning engineers without reference to the neural attention literature (Treisman, Desimone & Duncan, Moran & Desimone 1985). The parallel was noted by Lindsay (2020) in a review article but has not yet led to productive bidirectional exchange. Neuroscientists and ML researchers publish in separate venues (NeurIPS vs. Nature Neuroscience), use incompatible terminology, and have different evaluation criteria (benchmark performance vs. neural data fit). Joint workshops and comparative modelling challenges are needed to translate insights across the divide.\n",
      "translation_table": [
        {
          "field_a_term": "Query vector Q",
          "field_b_term": "Task-relevant template or top-down attentional template",
          "note": "In visual cortex: template matching is implemented by multiplicative gain modulation via top-down feedback"
        },
        {
          "field_a_term": "Key vector K",
          "field_b_term": "Stimulus feature representation in sensory areas",
          "note": "The dot product Q·K measures feature match, analogous to cross-correlation of template and stimulus"
        },
        {
          "field_a_term": "Softmax normalisation",
          "field_b_term": "Divisive normalisation / winner-take-all competition",
          "note": "Attention weights sum to 1; biologically implemented by inhibitory interneuron pools"
        },
        {
          "field_a_term": "Value vector V",
          "field_b_term": "Stimulus content to be routed/transmitted",
          "note": "Attended stimulus content is amplified and routed to higher areas; unattended is suppressed"
        },
        {
          "field_a_term": "Multi-head attention (h parallel heads)",
          "field_b_term": "Multiple parallel attentional streams (feature, spatial, temporal)",
          "note": "Each head = separate attentional spotlight on a different feature subspace"
        },
        {
          "field_a_term": "Self-attention (Q = K = V)",
          "field_b_term": "Lateral inhibition + contextual modulation within a cortical area",
          "note": "Every token/neuron modulated by every other; long-range in transformers, local in cortex"
        },
        {
          "field_a_term": "Attention head specialisation",
          "field_b_term": "Feature-selective cortical areas (V4 for colour, MT for motion)",
          "note": "Individual transformer heads show interpretable specialisation (syntactic, positional, semantic)"
        }
      ],
      "references": [
        {
          "arxiv": "1706.03762",
          "note": "Vaswani et al. (2017) NeurIPS — Attention is All You Need; transformer architecture"
        },
        {
          "doi": "10.1126/science.229.4717.782",
          "note": "Moran & Desimone (1985) Science 229:782 — selective attention gates visual processing in extrastriate cortex"
        },
        {
          "doi": "10.1016/0010-0285(80)90005-5",
          "note": "Treisman & Gelade (1980) Cogn Psychol 12:97 — feature integration theory of attention"
        },
        {
          "doi": "10.1016/j.cub.2019.11.015",
          "note": "Lindsay (2020) Curr Biol 30:R268 — attention in psychology and computational neuroscience"
        },
        {
          "doi": "10.1016/j.neuron.2001.09.018",
          "note": "Desimone & Duncan (1995, cited review) — neural mechanisms of selective visual attention; biased competition"
        }
      ],
      "last_reviewed": "2026-05-05",
      "file": "cross-domain/computer-science-neuroscience/b-transformer-attention-neural-attention.yaml"
    },
    {
      "id": "b-combinatorial-optimization-spin-glass",
      "title": "Hard combinatorial optimization problems (k-SAT, graph coloring, TSP) exhibit phase transitions in solution difficulty that map precisely onto spin glass energy landscape topology, with the satisfiability threshold corresponding to the spin glass phase boundary\n",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "Random k-SAT and related NP-hard combinatorial optimization problems undergo a sharp phase transition at a critical clause-to-variable ratio α_c where the fraction of satisfiable instances drops from ~1 to ~0: this transition corresponds to the replica-symmetry-breaking transition in a Ising spin gl",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-replica-symmetry-breaking-algorithm-hardness"
      ],
      "communication_gap": "Computer scientists study worst-case complexity classes while statistical physicists analyse average-case behavior of disordered systems; the cavity method / replica method from spin glass theory is underutilized in algorithm design despite providing exact predictions for average-case hardness.\n",
      "translation_table": [
        {
          "field_a_term": "satisfying assignment in k-SAT (computer science)",
          "field_b_term": "ground state configuration of Ising spin glass (statistical physics)",
          "note": "Each Boolean variable maps to an Ising spin; clauses map to plaquette interactions; solution = zero-energy ground state"
        },
        {
          "field_a_term": "clause-to-variable ratio α in random k-SAT (computer science)",
          "field_b_term": "inverse temperature × interaction strength controlling spin glass phases (statistical physics)",
          "note": "α controls the constraint density; α_c corresponds to the spin glass phase transition point"
        },
        {
          "field_a_term": "algorithmic hardness / exponential search time (computer science)",
          "field_b_term": "rough energy landscape with exponentially many metastable states separated by barriers (statistical physics)",
          "note": "Hard instances near α_c correspond to spin glass phase where simple annealing gets trapped in local minima"
        },
        {
          "field_a_term": "survey propagation algorithm (computer science)",
          "field_b_term": "belief propagation on a factor graph in the cavity method (statistical physics)",
          "note": "SP is derived from the 1-RSB cavity method of spin glass theory; it succeeds near threshold where DPLL fails"
        }
      ],
      "references": [
        {
          "doi": "10.1126/science.1073287",
          "note": "Mézard et al. (2002) - analytic and algorithmic solution of random satisfiability problems via survey propagation"
        },
        {
          "doi": "10.1016/j.tcs.2006.08.023",
          "note": "Krzakala et al. (2007) - Gibbs states and the set of solutions of random constraint satisfaction problems"
        }
      ],
      "last_reviewed": "2026-05-07",
      "file": "cross-domain/computer-science-physics/b-combinatorial-optimization-spin-glass.yaml"
    },
    {
      "id": "b-koopman-edmd-x-video-dynamics-linearization",
      "title": "Extended Dynamic Mode Decomposition approximates Koopman-invariant subspaces to linearize nonlinear dynamics, bridging dynamical systems theory with video sequence modeling and forecasting surrogates.",
      "source_domain": "",
      "target_domain": "",
      "status": "proposed",
      "bridge_claim": "Established data-driven method (EDMD) approximates Koopman eigenfunctions from trajectory dictionaries; speculative analogy for video—learned linear evolution in lifted feature spaces may forecast short horizons when latent dynamics are approximately low-dimensional and stationary.",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-data-driven-koopman-basis-improves-long-horizon-video-prediction"
      ],
      "communication_gap": "Computer vision emphasizes perceptual metrics while dynamical systems emphasizes spectral residual diagnostics rarely reported in ML venues.",
      "translation_table": [
        {
          "field_a_term": "observable dictionary",
          "field_b_term": "CNN / patch feature maps",
          "note": "Dictionary richness determines spectral bias."
        },
        {
          "field_a_term": "Koopman operator finite matrix approximation",
          "field_b_term": "linear recurrence in latent space",
          "note": "Closure error grows with horizon and complexity."
        },
        {
          "field_a_term": "spectral decomposition",
          "field_b_term": "mode-based motion templates",
          "note": "Interpretability varies with representation learning choices."
        }
      ],
      "references": [
        {
          "arxiv": "1312.5186",
          "note": "Extended Dynamic Mode Decomposition foundational methodology reference."
        }
      ],
      "last_reviewed": "2026-05-09",
      "file": "cross-domain/computer-science-physics/b-koopman-edmd-x-video-dynamics-linearization.yaml"
    },
    {
      "id": "b-quantum-complexity-circuit-depth",
      "title": "Random quantum circuits of sufficient depth produce probability distributions that are computationally hard to classically sample from, establishing a complexity-theoretic separation between quantum and classical computation that connects circuit depth theory to the physics of quantum chaos, entanglement growth, and decoherence.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "Classical computational complexity: the class BPP (bounded-error probabilistic polynomial time) captures what classical computers can efficiently compute. BQP (bounded-error quantum polynomial time) adds quantum superposition and entanglement. It is widely believed that BQP ≠ BPP (quantum computers ",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-random-circuit-sampling-classical-boundary-fidelity"
      ],
      "communication_gap": "Computational complexity theory (Arora & Barak, STOC/FOCS) and quantum physics (Phys Rev Lett, Nature Physics) developed independently for decades. Quantum computing theory (Shor 1994, Grover 1996) was done by computer scientists and physicists jointly but focused on algorithms, not circuit complexity. The specific connection between random circuit complexity and quantum chaos via OTOCs was made by Hayden & Preskill (2007) for black hole information and extended to circuits by Roberts & Yoshida (2017). Experimental quantum computing groups (Google, IBM) bridge these literatures in practice but the theoretical communities remain somewhat separate. Complexity theorists rarely attend quantum chaos conferences and vice versa.\n",
      "translation_table": [
        {
          "field_a_term": "quantum circuit of depth d on n qubits",
          "field_b_term": "quantum chaotic system evolved for time t = d (discrete time)"
        },
        {
          "field_a_term": "random 2-qubit gate (Haar-random unitary)",
          "field_b_term": "random time step in quantum chaotic dynamics"
        },
        {
          "field_a_term": "anti-concentration (Porter-Thomas distribution of output probabilities)",
          "field_b_term": "quantum ergodicity (eigenstate thermalisation hypothesis for circuits)"
        },
        {
          "field_a_term": "quantum volume V_Q = 2^min(n,d)",
          "field_b_term": "Hilbert space volume accessible to quantum chaotic dynamics"
        },
        {
          "field_a_term": "cross-entropy benchmarking (XEB) fidelity",
          "field_b_term": "overlap between quantum state and its ideal counterpart (state fidelity)"
        },
        {
          "field_a_term": "tensor network contraction (classical simulation)",
          "field_b_term": "area law of entanglement entropy (condition for efficient classical description)"
        },
        {
          "field_a_term": "quantum supremacy threshold (fidelity F*)",
          "field_b_term": "quantum chaos threshold (scrambling time t_scramble)"
        }
      ],
      "references": [
        {
          "note": "Nielsen & Chuang (2000) Quantum Computation and Quantum Information. Cambridge"
        },
        {
          "doi": "10.1038/s41567-018-0318-2",
          "note": "Boixo et al. (2018) Nat Phys 14:595 — quantum supremacy framework, XEB"
        },
        {
          "doi": "10.1038/s41586-019-1666-5",
          "note": "Arute et al. (2019) Nature 574:505 — Google quantum supremacy experiment"
        },
        {
          "doi": "10.1038/s41567-019-0483-4",
          "note": "Bouland et al. (2019) Nat Phys 15:159 — complexity-theoretic hardness of RCS"
        }
      ],
      "last_reviewed": "2026-05-06",
      "file": "cross-domain/computer-science-physics/b-quantum-complexity-circuit-depth.yaml"
    },
    {
      "id": "b-quantum-supremacy-complexity",
      "title": "Google's Sycamore quantum processor (2019) demonstrated quantum computational advantage by sampling a random quantum circuit distribution in 200s vs estimated 10,000 classical years, framing the question of quantum advantage as the complexity separation BQP vs BPP and connecting quantum entanglement physics to computational complexity theory.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "proposed",
      "bridge_claim": "Google's 53-qubit Sycamore processor (Arute et al. 2019) sampled the output distribution of a pseudo-random quantum circuit in 200s, with classical simulation estimated at 10,000 years on Summit supercomputer. The complexity-theoretic interpretation: this demonstrates BQP is likely not contained in ",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-quantum-error-correction-surface-code-overhead-v2"
      ],
      "communication_gap": "Complexity theorists and quantum physicists largely work in separate communities despite the quantum supremacy question requiring both. The complexity-theoretic framing (BQP vs BPP) is rarely explained in physics papers; the physics of noise and decoherence is rarely engaged in complexity theory papers. Preskill (2018) coined \"quantum supremacy\" specifically to bridge communities.\n",
      "translation_table": [
        {
          "field_a_term": "quantum entanglement (non-separable multi-qubit state)",
          "field_b_term": "computational resource enabling BQP separation from BPP",
          "note": "entanglement is the physical resource; complexity advantage is the computational consequence"
        },
        {
          "field_a_term": "quantum interference (amplitude cancellation in circuit)",
          "field_b_term": "hardness of classical simulation (exponential amplitude summation)",
          "note": "constructive/destructive interference is precisely what makes classical simulation hard"
        },
        {
          "field_a_term": "random quantum circuit sampling",
          "field_b_term": "anticoncentration (output distribution close to uniform, hard to spoof classically)",
          "note": "anticoncentration property ensures classical simulation requires exponential time"
        },
        {
          "field_a_term": "matrix permanent (BosonSampling)",
          "field_b_term": "#P-hard problem in computational complexity",
          "note": "computing Perm(A) for complex A is"
        }
      ],
      "references": [
        {
          "doi": "10.1038/s41586-019-1666-5",
          "note": "Arute et al. (2019) — Quantum supremacy using a programmable superconducting processor; Nature 574:505"
        },
        {
          "doi": "10.1145/1993636.1993682",
          "note": "Aaronson & Arkhipov (2011) — The computational complexity of linear optics; STOC"
        },
        {
          "doi": "10.22331/q-2018-08-06-79",
          "note": "Preskill (2018) — Quantum computing in the NISQ era and beyond; Quantum 2:79"
        },
        {
          "doi": "10.1126/science.abe8770",
          "note": "Zhong et al. (2020) — Quantum computational advantage using photons; Science 370:1460"
        }
      ],
      "last_reviewed": "2026-05-06",
      "file": "cross-domain/computer-science-physics/b-quantum-supremacy-complexity.yaml"
    },
    {
      "id": "b-self-supervised-learning-x-statistical-mechanics",
      "title": "Contrastive self-supervised learning — pulling positive pairs together and pushing negatives apart — resembles learning energy-based and Boltzmann-machine style scores where temperature controls sharpness of discrimination.",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "Energy-based models assign low energy to plausible configurations; training shapes the energy landscape so that data lie in wells. Contrastive objectives such as InfoNCE reweight logits of positive versus negative pairs using a softmax with temperature parameter τ, producing a probabilistic discrimi",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-contrastive-loss-implements-high-temperature-energy-comparison"
      ],
      "communication_gap": "SSL papers discuss temperature heuristically; physics-flavored EBMs emphasize equilibrium. Cross notation is inconsistent, obscuring when τ controls optimization stability versus representation geometry.",
      "translation_table": [
        {
          "field_a_term": "Boltzmann factor exp(-E/kT)",
          "field_b_term": "softmax weights exp(sim/τ) in contrastive logits"
        },
        {
          "field_a_term": "partition function / normalization",
          "field_b_term": "sum over negatives in denominator"
        },
        {
          "field_a_term": "free energy minimization",
          "field_b_term": "contrastive loss minimization"
        }
      ],
      "references": [
        {
          "doi": "10.1162/neco.2002.14.8.1771",
          "note": "Hinton (2002) — training products of experts / contrastive divergence family"
        },
        {
          "arxiv": "2002.05709",
          "note": "Chen et al. (2020) — SimCLR contrastive learning framework"
        }
      ],
      "last_reviewed": "2026-05-07",
      "file": "cross-domain/computer-science-physics/b-self-supervised-learning-x-statistical-mechanics.yaml"
    },
    {
      "id": "b-simulated-annealing-stat-mech",
      "title": "The simulated annealing metaheuristic (Kirkpatrick et al. 1983) is a direct algorithmic implementation of statistical-mechanical annealing: the Metropolis acceptance criterion mirrors the Boltzmann factor and the cooling schedule controls convergence to the configuration-space ground state.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "Kirkpatrick et al. (1983) introduced simulated annealing by recognising that combinatorial optimization problems are formally equivalent to finding the ground state of a physical system. The acceptance probability P(accept) = exp(-ΔE/T) in the algorithm is precisely the Boltzmann factor from statist",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-simulated-annealing-quantum-speedup"
      ],
      "communication_gap": "The 1983 Kirkpatrick paper appeared in Science and was read by computer scientists who adopted the algorithm without internalizing its statistical-mechanical foundations. Meanwhile, physicists who understand the Boltzmann distribution intimately rarely connect it to combinatorial optimization. The Metropolis (1953) paper is among the most-cited in computational physics yet is rarely cited in the operations research and algorithm design literature where simulated annealing is taught.\n",
      "translation_table": [
        {
          "field_a_term": "cost function E(x)",
          "field_b_term": "potential energy of a configuration",
          "note": "minimizing cost = finding ground state"
        },
        {
          "field_a_term": "acceptance probability exp(-ΔE/T)",
          "field_b_term": "Boltzmann factor for thermal fluctuations",
          "note": "identical mathematical expression in both fields"
        },
        {
          "field_a_term": "computational temperature T",
          "field_b_term": "thermal energy kT of the physical system",
          "note": "controls the width of the Boltzmann distribution over solutions"
        },
        {
          "field_a_term": "cooling schedule T(t) → 0",
          "field_b_term": "physical annealing (slow cooling from melt)",
          "note": "logarithmic schedule guarantees convergence; faster schedules → local optima"
        },
        {
          "field_a_term": "global optimum of cost function",
          "field_b_term": "ground state (minimum energy configuration)"
        },
        {
          "field_a_term": "neighborhood of a solution (local moves)",
          "field_b_term": "configuration space accessible by thermal fluctuations"
        }
      ],
      "references": [
        {
          "doi": "10.1126/science.220.4598.671",
          "note": "Kirkpatrick et al. (1983) — Optimization by simulated annealing; Science 220:671"
        },
        {
          "doi": "10.1063/1.1699112",
          "note": "Metropolis et al. (1953) — Equation of state calculations by fast computing machines; J Chem Phys 21:1087"
        },
        {
          "note": "Černý (1985) — Thermodynamical approach to the traveling salesman problem; J Optim Theory Appl 45:41"
        },
        {
          "note": "van Laarhoven & Aarts (1987) — Simulated Annealing: Theory and Applications; Kluwer"
        }
      ],
      "last_reviewed": "2026-05-06",
      "file": "cross-domain/computer-science-physics/b-simulated-annealing-stat-mech.yaml"
    },
    {
      "id": "b-pac-learning-generalization",
      "title": "PAC learning theory ↔ statistical generalisation — VC dimension as the degrees of freedom of a hypothesis class",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "PAC (Probably Approximately Correct) learning theory (Valiant 1984) provides a mathematical framework for when a learning algorithm can generalise from training data to unseen examples. A concept class C is PAC-learnable if for any accuracy ε and confidence δ there exists a polynomial-time algorithm",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-deep-learning-implicit-regularisation-as-minimum-description-length"
      ],
      "communication_gap": "Valiant (computer scientist) and Vapnik-Chervonenkis (statisticians) worked completely independently — Valiant's 1984 paper appeared in Comm ACM (computer science) while VC theory was in Soviet statistics journals. The formal equivalence between VC theory and classical statistics (AIC, BIC) is rarely taught in either machine learning or statistics courses. Statistical physics approaches to learning theory (Gardner, Derrida, Zdeborová) are published in physics journals mostly unknown to machine learning researchers despite solving problems the ML community still treats as open.\n",
      "translation_table": [
        {
          "field_a_term": "VC dimension d_VC",
          "field_b_term": "effective number of parameters / model complexity",
          "note": "d_VC bounds overfitting exactly as AIC penalises free parameters in classical statistics"
        },
        {
          "field_a_term": "PAC sample complexity m = O(d_VC/ε)",
          "field_b_term": "statistical sample size for consistent estimation",
          "note": "Both quantify how much data is needed to reliably estimate a model of given complexity"
        },
        {
          "field_a_term": "shattering (perfectly classifying all 2ⁿ labellings of n points)",
          "field_b_term": "interpolation / memorisation of training data",
          "note": "H shatters n points → H can overfit n points perfectly; analogous to exact interpolation"
        },
        {
          "field_a_term": "Rademacher complexity",
          "field_b_term": "expected maximum correlation with random noise",
          "note": "Tighter modern complexity measure; analogous to Fisher information in statistics"
        },
        {
          "field_a_term": "bias-variance tradeoff (underfitting vs overfitting)",
          "field_b_term": "model selection tradeoff (too simple vs too complex)",
          "note": "Identical concept in machine learning and classical statistics; PAC quantifies it exactly"
        },
        {
          "field_a_term": "double descent phenomenon (test error dips again past interpolation threshold)",
          "field_b_term": "phase transition in statistical physics (disorder-order transition)",
          "note": "Modern DNN behaviour: above d_VC threshold, test error decreases again — a phase transition"
        }
      ],
      "references": [
        {
          "doi": "10.1145/1968.1972",
          "note": "Valiant (1984) Commun ACM 27:1134 — PAC learning framework"
        },
        {
          "note": "Vapnik & Chervonenkis (1971) Theor Prob Appl 16:264 — VC dimension",
          "doi": "10.1137/1116025"
        },
        {
          "doi": "10.1145/76359.76371",
          "note": "Blumer et al. (1989) J ACM 36:929 — VC dimension and PAC learnability equivalence"
        },
        {
          "note": "Shalev-Shwartz & Ben-David (2014) Understanding Machine Learning. Cambridge University Press.",
          "url": "https://www.cs.huji.ac.il/~shais/UnderstandingMachineLearning/"
        }
      ],
      "last_reviewed": "2026-05-06",
      "file": "cross-domain/computer-science-statistics/b-pac-learning-generalization.yaml"
    },
    {
      "id": "b-replica-exchange-tempering-x-bayesian-neural-posteriors",
      "title": "Replica-exchange tempering bridges molecular-simulation sampling and multimodal Bayesian neural posterior exploration.",
      "source_domain": "",
      "target_domain": "",
      "status": "proposed",
      "bridge_claim": "Parallel tempering mitigates trapping in rugged posterior landscapes by swapping chains across temperature levels. The method is established in molecular simulation and increasingly relevant for Bayesian deep models with multimodal uncertainty.",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-adaptive-temperature-ladders-improve-posterior-mixing"
      ],
      "communication_gap": "Adjacent communities use different software stacks and validation norms, so mathematically equivalent tools are often rediscovered in parallel.",
      "translation_table": [
        {
          "field_a_term": "Temperature ladder",
          "field_b_term": "Posterior smoothing schedule",
          "note": "High temperatures flatten local barriers."
        },
        {
          "field_a_term": "Replica swap acceptance",
          "field_b_term": "Cross-chain state exchange",
          "note": "Improves exploration without discarding detailed balance."
        },
        {
          "field_a_term": "Autocorrelation time",
          "field_b_term": "Posterior mixing efficiency",
          "note": "Primary diagnostic for usable effective sample size."
        }
      ],
      "references": [
        {
          "doi": "10.1098/rsta.1922.0009",
          "note": "Fisher (1922) estimation and information."
        },
        {
          "doi": "10.1017/S0962492910000061",
          "note": "Stuart (2010) Bayesian inverse-problem foundations."
        }
      ],
      "last_reviewed": "2026-05-08",
      "file": "cross-domain/computer-science-statistics/b-replica-exchange-tempering-x-bayesian-neural-posteriors.yaml"
    },
    {
      "id": "b-ridge-regression-x-shrinkage",
      "title": "Ridge regression — L2 penalized least squares — is the maximum a posteriori estimator under a Gaussian prior on weights, linking frequentist shrinkage to Bayesian regularization.",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "Ordinary least squares minimizes squared error; adding an L2 penalty pulls coefficients toward zero, stabilizing ill-conditioned designs by trading bias for variance. Equivalently, with Gaussian likelihood and independent Gaussian priors on coefficients, the MAP estimate solves the ridge objective w",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-ridge-penalty-matches-bayesian-width-in-neural-decoding"
      ],
      "communication_gap": "Statistics curricula teach ridge as penalization; some ML courses omit the Gaussian prior story, hindering uncertainty quantification links.",
      "translation_table": [
        {
          "field_a_term": "penalty λ",
          "field_b_term": "prior precision / noise ratio in Gaussian MAP"
        },
        {
          "field_a_term": "ridge solution (XᵀX + λI)⁻¹ Xᵀ y",
          "field_b_term": "posterior mean under conjugate normal-normal model"
        },
        {
          "field_a_term": "effective degrees of freedom",
          "field_b_term": "Bayesian model complexity measure"
        }
      ],
      "references": [
        {
          "doi": "10.1080/00401706.1970.10488634",
          "note": "Hoerl & Kennard (1970) — ridge regression foundations"
        },
        {
          "doi": "10.1007/978-0-387-84858-7_7",
          "note": "Murphy (2012) — probabilistic interpretation of ridge/MAP (handbook chapter DOI cluster)"
        }
      ],
      "last_reviewed": "2026-05-07",
      "file": "cross-domain/computer-science-statistics/b-ridge-regression-x-shrinkage.yaml"
    },
    {
      "id": "b-compressed-sensing-x-accelerated-mri-protocol-design",
      "title": "Compressed-sensing theory connects sparse recovery guarantees to accelerated MRI protocol design.",
      "source_domain": "",
      "target_domain": "",
      "status": "proposed",
      "bridge_claim": "Speculative analogy: Restricted-measurement sparse recovery theory can guide MRI acquisition schedules that preserve clinically relevant structure at lower scan times.",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-adaptive-kspace-schedules-preserve-diagnostic-mri-quality-at-higher-acceleration"
      ],
      "communication_gap": "Signal-processing benchmarks focus on reconstruction error, while clinical radiology prioritizes diagnosis-level robustness and workflow constraints.",
      "translation_table": [],
      "references": [
        {
          "doi": "10.1002/mrm.21391",
          "note": "Sparse MRI with compressed sensing."
        }
      ],
      "last_reviewed": "2026-05-08",
      "file": "cross-domain/computer-vision-radiology/b-compressed-sensing-x-accelerated-mri-protocol-design.yaml"
    },
    {
      "id": "b-graph-cut-energy-minimization-x-radiology-lesion-segmentation-qc",
      "title": "Graph-cut energy diagnostics can transfer from computer vision optimization to radiology segmentation quality control.",
      "source_domain": "",
      "target_domain": "",
      "status": "proposed",
      "bridge_claim": "Speculative analogy: Graph-cut energy diagnostics can transfer from computer vision optimization to radiology segmentation quality control.",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-graph-cut-energy-residuals-detect-lesion-segmentation-failure-modes-earlier"
      ],
      "communication_gap": "Domain-specific vocabularies and benchmark conventions obscure transferable mathematical structure.",
      "translation_table": [],
      "references": [
        {
          "doi": "10.1038/nn.2733",
          "note": "Clinical signal-inference setting motivating robust model-based QC metrics."
        }
      ],
      "last_reviewed": "2026-05-08",
      "file": "cross-domain/computer-vision-radiology/b-graph-cut-energy-minimization-x-radiology-lesion-segmentation-qc.yaml"
    },
    {
      "id": "b-residual-learning-x-automated-retinal-screening-robustness",
      "title": "Residual learning links deep optimization stability with scalable retinal screening pipelines.",
      "source_domain": "",
      "target_domain": "",
      "status": "proposed",
      "bridge_claim": "Speculative analogy: Residual skip pathways mitigate optimization degradation in medical image classifiers and can improve robustness in retinal screening workflows.",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-self-supervised-residual-pretraining-reduces-retinal-screening-false-negatives"
      ],
      "communication_gap": "Computer-vision model development targets benchmark accuracy, while screening programs prioritize low false negatives and calibration governance.",
      "translation_table": [],
      "references": [
        {
          "doi": "10.1109/CVPR.2016.90",
          "note": "Deep Residual Learning for Image Recognition."
        }
      ],
      "last_reviewed": "2026-05-08",
      "file": "cross-domain/computer-vision-radiology/b-residual-learning-x-automated-retinal-screening-robustness.yaml"
    },
    {
      "id": "b-unet-segmentation-x-histopathology-quantification-workflows",
      "title": "U-Net segmentation architectures bridge biomedical image analysis and reproducible histopathology quantification.",
      "source_domain": "",
      "target_domain": "",
      "status": "proposed",
      "bridge_claim": "Speculative analogy: Encoder-decoder inductive biases in U-Net provide a transferable mapping between pixel-level context aggregation and pathology region quantification.",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-stain-normalized-unet-training-improves-cross-site-pathology-consistency"
      ],
      "communication_gap": "Computer-vision benchmarks emphasize Dice-like metrics, while pathology practice needs site-robust, decision-linked quantification.",
      "translation_table": [],
      "references": [
        {
          "doi": "10.1007/978-3-319-24574-4_28",
          "note": "U-Net architecture for biomedical image segmentation."
        }
      ],
      "last_reviewed": "2026-05-08",
      "file": "cross-domain/computer-vision-radiology/b-unet-segmentation-x-histopathology-quantification-workflows.yaml"
    },
    {
      "id": "b-liquid-crystal-cell-membranes",
      "title": "Cell membranes are two-dimensional liquid crystals — lipid bilayers exhibit orientational order without positional order, obeying Frank elastic energy, with membrane proteins as topological defects and lipid-raft phase separation as a liquid-liquid phase transition in a 2D system.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "The physics of liquid crystals — materials with orientational order but no positional order (nematic phase) — applies directly to cell membranes.\n1. Frank elastic energy for membranes. The deformation energy of a nematic\n   liquid crystal is F = ½∫[K₁(∇·n̂)² + K₂(n̂·∇×n̂)² + K₃(n̂×∇×n̂)²] dV,\n   whe",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-membrane-defects-protein-clustering"
      ],
      "communication_gap": "Soft matter physicists (studying LCs) and cell biologists (studying membranes) rarely collaborate. The mathematical equivalence is well-known in biophysics but underutilized: biologists often describe lipid rafts phenomenologically without connecting to the quantitative LC elastic energy framework that would predict defect interactions and protein clustering.\n",
      "translation_table": [
        {
          "field_a_term": "nematic director field n̂ (liquid crystal physics)",
          "field_b_term": "lipid acyl chain orientation order parameter (cell biology)",
          "note": "Both describe local orientational order without positional order"
        },
        {
          "field_a_term": "Frank elastic constants K₁, K₂, K₃ (splay, twist, bend)",
          "field_b_term": "Helfrich bending modulus κ and Gaussian curvature modulus κ_G",
          "note": "Helfrich energy is Frank energy restricted to a 2D surface"
        },
        {
          "field_a_term": "topological defect ±½ in nematic (liquid crystal)",
          "field_b_term": "integral membrane protein with local lipid order distortion",
          "note": "Proteins perturb the lipid director field analogous to LC defects"
        },
        {
          "field_a_term": "nematic-to-isotropic phase transition (LC physics)",
          "field_b_term": "liquid-ordered to liquid-disordered transition (cell membrane)",
          "note": "The gel-to-fluid transition of membranes is the biological analog"
        },
        {
          "field_a_term": "liquid-liquid phase coexistence (binary LC mixture)",
          "field_b_term": "lipid raft / non-raft phase separation in plasma membrane",
          "note": "Raft domains are liquid-ordered phase islands in a liquid-disordered sea"
        },
        {
          "field_a_term": "electro-optic effect (LCD)",
          "field_b_term": "electric-field-driven lipid redistribution / electroporation",
          "note": "External electric fields reorder both LC molecules and membrane lipids"
        }
      ],
      "references": [
        {
          "doi": "10.1515/znc-1973-11-1209",
          "note": "Helfrich (1973) Z Naturforsch 28c:693 — elastic properties of lipid bilayers"
        },
        {
          "doi": "10.1016/0022-5193(70)90052-5",
          "note": "Canham (1970) J Theor Biol 26:61 — minimum energy of red blood cell shape"
        },
        {
          "doi": "10.1126/science.175.4023.720",
          "note": "Singer & Nicolson (1972) Science 175:720 — fluid mosaic model of cell membranes"
        },
        {
          "url": "https://global.oup.com/academic/product/the-physics-of-liquid-crystals-9780198517856",
          "note": "de Gennes & Prost (1993) The Physics of Liquid Crystals — Oxford University Press"
        }
      ],
      "last_reviewed": "2026-05-06",
      "file": "cross-domain/condensed-matter-biology/b-liquid-crystal-cell-membranes.yaml"
    },
    {
      "id": "b-structural-color-photonic-crystal-band-gaps",
      "title": "The structural colors of butterfly wings, beetle shells, and bird feathers arise from nanoscale photonic crystal structures that produce photonic band gaps and thin-film interference, connecting evolutionary biology to condensed matter physics and photonics.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "Biological nanostructures (opal-like arrays, gyroid morphologies, thin-film stacks) function as photonic crystals: periodic dielectric structures with lattice constants comparable to visible light wavelengths (200–700 nm) that exhibit photonic band gaps — frequency ranges where light propagation is ",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-photonic-crystal-self-assembly-block-copolymer"
      ],
      "communication_gap": "Evolutionary biologists studying structural color and condensed matter physicists studying photonic crystals publish in separate literatures; the transfer of knowledge from condensed matter photonics to understanding the evolution and function of biological colors is growing but remains incomplete; most biologists are unfamiliar with photonic band gap calculations.\n",
      "translation_table": [
        {
          "field_a_term": "butterfly wing nanostructure (biology)",
          "field_b_term": "2-D or 3-D photonic crystal lattice (condensed matter)",
          "note": "Chitin-air periodic arrays with period ~150 nm act as photonic crystals for visible light"
        },
        {
          "field_a_term": "iridescence / angle-dependent color (biology)",
          "field_b_term": "photonic band gap angular dispersion (condensed matter)",
          "note": "Bragg reflection angle gives iridescence; quasi-disorder reduces angle-dependence"
        },
        {
          "field_a_term": "gyroid sponge structure in butterfly wing scales (biology)",
          "field_b_term": "gyroid photonic crystal with cubic symmetry (condensed matter)",
          "note": "Biological gyroids self-assemble via lipid block-copolymer phase separation"
        },
        {
          "field_a_term": "matte (non-iridescent) structural color (biology)",
          "field_b_term": "photonic glass / amorphous photonic structure (condensed matter)",
          "note": "Short-range order without long-range periodicity gives angle-independent color"
        }
      ],
      "references": [
        {
          "doi": "10.1063/1.1564060",
          "note": "Vukusic & Sambles (2003) — photonic structures in biology"
        },
        {
          "doi": "10.1038/nature07609",
          "note": "Dufresne et al. (2009) — self-assembly of amorphous biophotonic nanostructures"
        },
        {
          "doi": "10.1126/science.1152592",
          "note": "Shawkey & Hill (2006) — significance of a basal melanin layer to production of non-iridescent structural plumage color"
        }
      ],
      "last_reviewed": "2026-05-07",
      "file": "cross-domain/condensed-matter-biology/b-structural-color-photonic-crystal-band-gaps.yaml"
    },
    {
      "id": "b-rock-magnetism-spin-ordering-domains",
      "title": "The remanent magnetization recorded in ferromagnetic minerals (magnetite, hematite) in rocks follows the same Heisenberg exchange Hamiltonian and micromagnetic domain theory that governs magnetic storage materials in condensed matter physics: domain wall energy, coercivity, and thermoremanent acquisition are quantitatively predicted by the same Stoner-Wohlfarth and Landau-Lifshitz-Gilbert frameworks used in magnetic recording research",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "Rock magnetism applies condensed matter magnetic theory to geological materials: a single-domain magnetite grain acquires thermoremanent magnetization (TRM) by passing through its Curie temperature (578°C) in a field, and the blocking temperature T_B at which thermal fluctuations no longer randomize",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-rock-magnetism-single-domain-blocking-temperature"
      ],
      "communication_gap": "Paleomagnetists use rock magnetism empirically to date and correlate geological sections while condensed matter physicists develop fundamental magnetic domain theory in synthesized materials; the same quantitative models are used but rarely cross-cited, and ab initio calculations of iron oxide magnetic properties are not routinely incorporated into operational paleomagnetic protocols.",
      "translation_table": [
        {
          "field_a_term": "natural remanent magnetization (NRM) in paleomagnetic sample (geology)",
          "field_b_term": "remanent magnetization after field removal in magnetic storage medium (condensed matter)",
          "note": "Both encode a magnetic field at the time of acquisition; both are described by the same blocking temperature theory"
        },
        {
          "field_a_term": "blocking temperature T_B of magnetic mineral grain (geology)",
          "field_b_term": "thermal stability threshold KV/k_BT in single-domain particle (condensed matter)",
          "note": "T_B sets the temperature below which NRM is stable for geological time; same formula as data retention in HDDs"
        },
        {
          "field_a_term": "coercivity H_c of paleomagnetic mineral (geology)",
          "field_b_term": "switching field in Stoner-Wohlfarth single-domain model (condensed matter)",
          "note": "H_c = 2K/mu_0 M_s for a uniaxial single-domain grain; same physics determines HDD coercive field"
        },
        {
          "field_a_term": "Verwey transition at 120 K in magnetite (geology)",
          "field_b_term": "structural phase transition changing magnetic anisotropy and domain structure (condensed matter)",
          "note": "Verwey transition alters magnetite's crystal structure, changing K and domain wall structure, recognized identically in rock magnetism and condensed matter"
        }
      ],
      "references": [
        {
          "doi": "10.1029/JB069i002p00467",
          "note": "Neel (1955) - some theoretical aspects of rock magnetism (blocking temperature theory)"
        },
        {
          "doi": "10.1146/annurev.ea.19.050191.001313",
          "note": "Dunlop & Ozdemir (1997) - rock magnetism: fundamentals and frontiers (comprehensive bridge text)"
        },
        {
          "doi": "10.1103/PhysRev.96.1335",
          "note": "Stoner & Wohlfarth (1948) - a mechanism of magnetic hysteresis in heterogeneous alloys (SW model)"
        }
      ],
      "last_reviewed": "2026-05-07",
      "file": "cross-domain/condensed-matter-geology/b-rock-magnetism-spin-ordering-domains.yaml"
    },
    {
      "id": "b-thermobarometry-pt-path-thermodynamics",
      "title": "Metamorphic thermobarometry reconstructs the pressure-temperature history of rocks using equilibrium thermodynamics of mineral assemblages — the same chemical potential and Gibbs free energy minimisation that governs phase diagrams in materials science and physical chemistry, making metamorphic petrology an in-situ geological record of crustal thermodynamic state evolution.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "proposed",
      "bridge_claim": "When rocks are buried in subduction zones or mountain belts, they record their pressure-temperature (P-T) history through the stable mineral assemblages that crystallise at each condition. Thermobarometry reads this record by inverting equilibrium thermodynamics: the compositions of coexisting phase",
      "open_unknowns": [],
      "related_hypotheses": [],
      "communication_gap": "",
      "translation_table": [
        {
          "field_a_term": "Gibbs free energy minimisation",
          "field_b_term": "thermodynamic modelling of mineral assemblages (Perple_X)",
          "note": "Identical mathematical operation — find the phase assemblage that minimises total G"
        },
        {
          "field_a_term": "phase diagram",
          "field_b_term": "pseudosection (P-T diagram with predicted mineral assemblages)",
          "note": "A pseudosection is simply a phase diagram in P-T space for a fixed bulk composition"
        },
        {
          "field_a_term": "activity-composition model",
          "field_b_term": "mineral solid solution model (e.g., garnet a-x relations)",
          "note": "Describes how chemical potential varies with composition within a phase"
        },
        {
          "field_a_term": "CALPHAD database",
          "field_b_term": "internally consistent thermodynamic database (Holland & Powell)",
          "note": "Both are curated free energy databases for multicomponent systems"
        }
      ],
      "references": [
        {
          "doi": "10.1093/petrology/egl062",
          "note": "Holland & Powell (2004) — An improved and extended internally consistent thermodynamic dataset"
        },
        {
          "doi": "10.1002/9781119068099",
          "note": "Spear (1993) Metamorphic Phase Equilibria and Pressure-Temperature-Time Paths"
        },
        {
          "doi": "10.1144/GSL.SP.2004.235.01.05",
          "note": "Powell & Holland (2008) — On thermobarometry"
        }
      ],
      "last_reviewed": "2026-05-07",
      "file": "cross-domain/condensed-matter-geology/b-thermobarometry-pt-path-thermodynamics.yaml"
    },
    {
      "id": "b-moire-patterns-commensurability-superlattice",
      "title": "Moiré superlattices in twisted bilayer graphene arise from the incommensurability of two periodic lattices, a mathematical phenomenon governing commensurate- incommensurate transitions and the Frenkel-Kontorova model, connecting condensed matter physics to number theory and dynamical systems.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "When two hexagonal lattices are twisted by angle θ, the moiré pattern has wavelength λ_M = a/(2sin(θ/2)) that diverges as θ→0. Commensurability — whether the ratio of lattice constants is rational — determines whether the superlattice is periodic or quasiperiodic (Penrose-like). At the \"magic angle\"",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-moire-flat-band-strongly-correlated-universality"
      ],
      "communication_gap": "Condensed matter physicists studying moiré systems and mathematicians studying quasiperiodic systems / KAM theory rarely collaborate; the Frenkel-Kontorova literature is known to condensed matter theorists but the algebraic number theory perspective is largely unexplored in the moiré context.\n",
      "translation_table": [
        {
          "field_a_term": "twisted bilayer graphene magic angle (condensed matter)",
          "field_b_term": "resonant KAM torus / critical twist parameter (mathematics)",
          "note": "Magic angle is where the moiré flat band touches — a resonance condition in the Hamiltonian"
        },
        {
          "field_a_term": "moiré superlattice periodicity (condensed matter)",
          "field_b_term": "rational/irrational ratio of periods (number theory)",
          "note": "Commensurate = rational ratio = periodic superlattice; irrational = quasicrystal"
        },
        {
          "field_a_term": "Frenkel-Kontorova chain of atoms (condensed matter)",
          "field_b_term": "standard map / Chirikov map iteration (dynamical systems)",
          "note": "The FK Hamiltonian generates the standard map; KAM breakdown = Peierls-Nabarro barrier"
        },
        {
          "field_a_term": "correlated insulator / superconductor at magic angle (condensed matter)",
          "field_b_term": "flat-band localization at resonance (mathematics)",
          "note": "Flat bands signal vanishing kinetic energy — a degeneracy analogous to resonance in maps"
        }
      ],
      "references": [
        {
          "doi": "10.1038/nature26160",
          "note": "Cao et al. (2018) — correlated insulator in magic-angle twisted bilayer graphene"
        },
        {
          "doi": "10.1038/nature26158",
          "note": "Cao et al. (2018) — unconventional superconductivity in magic-angle graphene"
        },
        {
          "doi": "10.1103/PhysRevLett.56.2237",
          "note": "Aubry & André (1980) — quasiperiodic lattice and metal-insulator transition"
        }
      ],
      "last_reviewed": "2026-05-07",
      "file": "cross-domain/condensed-matter-mathematics/b-moire-patterns-commensurability-superlattice.yaml"
    },
    {
      "id": "b-bose-einstein-condensation-superfluidity-coherence",
      "title": "Bose-Einstein condensation, predicted by quantum statistics, underlies superfluidity in helium-4 and ultracold atomic gases: when bosons macroscopically occupy a single quantum state, off-diagonal long-range order and phase coherence produce dissipationless flow and quantized vortices.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "In a BEC, the N-particle wavefunction factorizes: Ψ(r₁,...,rN) ≈ ∏φ₀(rᵢ), where φ₀ is the single-particle ground state condensate wavefunction. The superfluid order parameter ψ(r) = √(n_s(r))·e^{iθ(r)} (n_s = superfluid density, θ = phase) satisfies the Gross-Pitaevskii equation: iℏ∂ψ/∂t = [-ℏ²∇²/2m",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-room-temperature-superfluidity-engineered-materials"
      ],
      "communication_gap": "Quantum optics / AMO physicists working with ultracold BEC and condensed matter physicists studying superfluid helium and superconductors share deep mathematical structures (Bogoliubov theory, vortex dynamics) but publish in separate communities; the connection was explicit from 1995 when dilute-gas BEC was first achieved.\n",
      "translation_table": [
        {
          "field_a_term": "Bose-Einstein condensate (quantum physics)",
          "field_b_term": "superfluid component of helium-4 (condensed matter)",
          "note": "Below Tλ = 2.17 K, the condensate fraction drives the superfluid density"
        },
        {
          "field_a_term": "macroscopic occupation of ground state (quantum physics)",
          "field_b_term": "off-diagonal long-range order ODLRO (condensed matter)",
          "note": "ODLRO is the rigorous criterion for superfluidity; BEC implies ODLRO in ideal Bose gas"
        },
        {
          "field_a_term": "condensate wavefunction / order parameter (quantum physics)",
          "field_b_term": "superfluid order parameter ψ(r) (condensed matter)",
          "note": "Same mathematical object; Gross-Pitaevskii equation governs both contexts"
        },
        {
          "field_a_term": "phase coherence length (quantum physics)",
          "field_b_term": "superfluid coherence length ξ (condensed matter)",
          "note": "ξ sets the size of vortex cores and is inversely related to interaction strength"
        }
      ],
      "references": [
        {
          "doi": "10.1126/science.269.5221.198",
          "note": "Anderson et al. (1995) - observation of BEC in dilute atomic vapor"
        },
        {
          "doi": "10.1103/RevModPhys.71.463",
          "note": "Dalfovo et al. (1999) - theory of BEC in trapped gases (Gross-Pitaevskii review)"
        },
        {
          "doi": "10.1103/PhysRev.104.576",
          "note": "Landau (1941, cited via Beliaev 1958) - original Landau criterion for superfluidity"
        }
      ],
      "last_reviewed": "2026-05-07",
      "file": "cross-domain/condensed-matter-particle-physics/b-bose-einstein-condensation-superfluidity-coherence.yaml"
    },
    {
      "id": "b-correlated-electron-systems-hubbard-model",
      "title": "The Hubbard model from quantum physics provides the minimal theoretical bridge between condensed matter physics and quantum many-body theory: it captures the competition between electron kinetic energy (band formation) and on-site Coulomb repulsion (Mott localization), explaining the emergence of Mott insulators, high-Tc superconductivity, and magnetic ordering.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "The Hubbard Hamiltonian H = -t∑_{<i,j>,σ}(c†_{iσ}c_{jσ} + h.c.) + U∑_i n_{i↑}n_{i↓} encodes a competition between kinetic energy (hopping t) and on-site repulsion U. The dimensionless ratio U/t determines the phase: U/t << 1 → metallic band behavior (Bloch waves, Fermi liquid); U/t >> 1 → Mott insul",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-doped-hubbard-model-sufficient-for-cuprate-superconductivity"
      ],
      "communication_gap": "Condensed matter experimentalists studying copper oxides and nickelates and quantum many-body theorists solving the Hubbard model share the same Hamiltonian but different toolkits (ARPES vs exact diagonalization vs DMFT vs QMC); the community is unified around the model but fragmented by method, and the sign problem in QMC prevents definitive numerical solutions at low temperature.\n",
      "translation_table": [
        {
          "field_a_term": "Mott insulator (condensed matter)",
          "field_b_term": "strongly-localized limit U/t >> 1 of Hubbard model (quantum physics)",
          "note": "Mott insulation arises from on-site Coulomb blockade; band theory incorrectly predicts a metal"
        },
        {
          "field_a_term": "correlated electron material (condensed matter)",
          "field_b_term": "strongly-correlated quantum many-body system (quantum physics)",
          "note": "Both refer to systems where U/t ≳ 1 and single-particle approximations fail"
        },
        {
          "field_a_term": "superexchange coupling J = 4t²/U (condensed matter)",
          "field_b_term": "second-order perturbation theory result in U/t >> 1 limit (quantum physics)",
          "note": "The antiferromagnetic Heisenberg model emerges from Hubbard at half-filling and large U"
        },
        {
          "field_a_term": "spectral weight transfer in Mott transition (condensed matter)",
          "field_b_term": "Hubbard bands and quasiparticle peak in DMFT (quantum physics)",
          "note": "DMFT captures the three-peak spectral function: lower/upper Hubbard bands + quasiparticle peak"
        }
      ],
      "references": [
        {
          "doi": "10.1098/rspa.1963.0204",
          "note": "Hubbard (1963) - electron correlations in narrow energy bands (original Hubbard model)"
        },
        {
          "doi": "10.1103/RevModPhys.68.13",
          "note": "Georges et al. (1996) - DMFT and its applications to strongly correlated electron systems"
        },
        {
          "doi": "10.1103/RevModPhys.78.17",
          "note": "Lee et al. (2006) - doping a Mott insulator — physics of high-temperature superconductivity"
        }
      ],
      "last_reviewed": "2026-05-07",
      "file": "cross-domain/condensed-matter-particle-physics/b-correlated-electron-systems-hubbard-model.yaml"
    },
    {
      "id": "b-symmetry-breaking-goldstone-bosons",
      "title": "Spontaneous symmetry breaking in any system with a continuous symmetry generates massless Goldstone bosons: the Goldstone theorem unifies pions in QCD, phonons in crystals, and magnons in ferromagnets under one mathematical framework",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "Goldstone's theorem (1961): whenever a continuous symmetry group G is spontaneously broken to subgroup H, the theory contains exactly dim(G/H) massless Goldstone bosons (in Lorentz-invariant theories; modified in non-relativistic systems). Applications: (1) QCD chiral symmetry breaking SU(2)_L × SU(",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-symmetry-breaking-goldstone-bosons"
      ],
      "communication_gap": "Condensed matter physicists who work with phonons, magnons, and superconductors understand these as Goldstone bosons intuitively but rarely engage with the formal proof of Goldstone's theorem or the particle physics literature on symmetry breaking patterns. Particle physicists who derive the pion properties from chiral perturbation theory are not always aware of the exact analogies in condensed matter where the same counting rules and effective field theories apply.\n",
      "translation_table": [
        {
          "field_a_term": "pion (π⁺, π⁻, π⁰) in QCD",
          "field_b_term": "pseudo-Goldstone bosons of chiral symmetry breaking SU(2)_L×SU(2)_R→SU(2)_V",
          "note": "Non-zero mass because u,d quarks have small but non-zero mass — explicit symmetry breaking"
        },
        {
          "field_a_term": "acoustic phonon (3 polarizations in 3D crystal)",
          "field_b_term": "Goldstone bosons of spontaneously broken continuous translation symmetry",
          "note": "Dispersion ω∝|k| for acoustic phonons — massless (gapless) exactly from Goldstone theorem"
        },
        {
          "field_a_term": "Higgs field vacuum expectation value ⟨φ⟩≠0",
          "field_b_term": "spontaneous breaking of electroweak U(1)_Y×SU(2)_L symmetry",
          "note": "Anderson-Higgs mechanism: Goldstone bosons are 'eaten' by W±, Z to give them mass"
        },
        {
          "field_a_term": "order parameter field φ (magnetization, Cooper pair condensate)",
          "field_b_term": "the field that acquires a non-zero VEV in the broken-symmetry phase",
          "note": "Magnitude of φ measures degree of symmetry breaking; phase encodes Goldstone mode"
        }
      ],
      "references": [
        {
          "doi": "10.1103/PhysRev.124.246",
          "note": "Goldstone (1961) Field theories with superconductor solutions. Nuovo Cimento 19:154"
        },
        {
          "doi": "10.1103/PhysRev.127.965",
          "note": "Goldstone, Salam & Weinberg (1962) Broken symmetries. Phys Rev 127:965"
        },
        {
          "doi": "10.1146/annurev.ns.44.120194.002435",
          "note": "Weinberg (1995) The Quantum Theory of Fields Vol II — SSB and Goldstone theorem treatment"
        }
      ],
      "last_reviewed": "2026-05-07",
      "file": "cross-domain/condensed-matter-particle-physics/b-symmetry-breaking-goldstone-bosons.yaml"
    },
    {
      "id": "b-topological-insulators-bulk-boundary",
      "title": "Topological insulators are bulk insulators whose conducting surface states are guaranteed by the bulk topological invariant via the bulk-boundary correspondence, making surface conduction robust against disorder.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "The existence and protection of surface states in topological insulators is governed by the bulk-boundary correspondence: a non-trivial Z2 topological invariant computed from bulk Bloch wavefunctions guarantees gapless boundary modes that cannot be removed without closing the bulk gap or breaking ti",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-topological-insulator-disorder-robustness"
      ],
      "communication_gap": "Condensed matter physicists compute topological invariants numerically from band structures while algebraic topologists classify spaces abstractly; the shared language of K-theory is known to experts but rarely taught in physics graduate programs, leading to parallel developments.\n",
      "translation_table": [
        {
          "field_a_term": "Z2 topological invariant (condensed matter)",
          "field_b_term": "homotopy class of map from Brillouin zone torus to classifying space",
          "note": "The invariant counts parity of occupied Kramers pairs at time-reversal-invariant momenta"
        },
        {
          "field_a_term": "bulk band gap (condensed matter)",
          "field_b_term": "topological phase / gapped Hamiltonian (topology)",
          "note": "A gapped bulk Hamiltonian defines a map from BZ to space of gapped Hamiltonians"
        },
        {
          "field_a_term": "topologically protected surface states",
          "field_b_term": "boundary of a manifold with non-trivial topology",
          "note": "Bulk-boundary correspondence is the physical realization of topological boundary conditions"
        },
        {
          "field_a_term": "time-reversal symmetry protection (condensed matter)",
          "field_b_term": "symmetry class AII in Altland-Zirnbauer classification",
          "note": "Symmetry constrains the classifying space and hence the allowed topological invariants"
        }
      ],
      "references": [
        {
          "doi": "10.1103/PhysRevB.76.045302",
          "note": "Fu, Kane & Mele (2007) - Z2 topological invariants for 3D topological insulators"
        },
        {
          "doi": "10.1103/RevModPhys.82.3045",
          "note": "Hasan & Kane (2010) - comprehensive review of topological insulators"
        },
        {
          "doi": "10.1103/PhysRevLett.95.226801",
          "note": "Kane & Mele (2005) - Z2 topological order and quantum spin Hall effect"
        }
      ],
      "last_reviewed": "2026-05-07",
      "file": "cross-domain/condensed-matter-topology/b-topological-insulators-bulk-boundary.yaml"
    },
    {
      "id": "b-symplectic-integrators-x-long-horizon-control",
      "title": "Symplectic integration from geometric mechanics improves long-horizon optimal-control rollout fidelity by reducing numerical energy drift in Hamiltonian-like systems.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "Long-horizon control and planning often propagate dynamics for thousands of steps; non-structure- preserving integrators can accumulate energy and phase drift that distorts optimization outcomes. Symplectic methods preserve geometric invariants better, aligning numerical trajectory quality with true",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-symplectic-controllers-preserve-energy-bounds-long-horizon"
      ],
      "communication_gap": "Control pipelines frequently treat integrators as interchangeable implementation detail, while geometric numerical analysis shows discretization choice can dominate long-horizon bias.\n",
      "translation_table": [
        {
          "field_a_term": "symplectic map preservation",
          "field_b_term": "reduced long-horizon model-bias in trajectory optimization",
          "note": "Better invariance preservation yields more reliable policy gradients and constraints."
        },
        {
          "field_a_term": "backward error analysis",
          "field_b_term": "modified-Hamiltonian interpretation of rollout error",
          "note": "Error behaves like nearby dynamics rather than arbitrary drift."
        },
        {
          "field_a_term": "near-conservation of invariants",
          "field_b_term": "improved feasibility for energy- or momentum-limited controllers",
          "note": "Constraint violation growth is slowed over planning horizons."
        }
      ],
      "references": [
        {
          "doi": "10.1017/S096249290200001X",
          "note": "Hairer (2002), geometric integration and long-time conservation properties."
        }
      ],
      "last_reviewed": "2026-05-08",
      "file": "cross-domain/control-engineering-mathematics/b-symplectic-integrators-x-long-horizon-control.yaml"
    },
    {
      "id": "b-control-barrier-functions-x-safe-artificial-pancreas",
      "title": "Control barrier functions provide formal safety certificates for closed-loop artificial-pancreas insulin dosing.",
      "source_domain": "",
      "target_domain": "",
      "status": "proposed",
      "bridge_claim": "Artificial pancreas control must optimize glucose while preventing dangerous lows. CBFs formalize safety sets and allow optimization-based controllers to enforce hard constraints in real time.",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-cbf-enforced-insulin-constraints-prevent-severe-lows"
      ],
      "communication_gap": "Adjacent communities use different software stacks and validation norms, so mathematically equivalent tools are often rediscovered in parallel.",
      "translation_table": [
        {
          "field_a_term": "Safe set h(x) >= 0",
          "field_b_term": "Clinically safe glucose envelope",
          "note": "Safety set encodes avoidable hypoglycemia region."
        },
        {
          "field_a_term": "CBF constraint",
          "field_b_term": "Insulin command safety filter",
          "note": "Overrides aggressive commands when safety is threatened."
        },
        {
          "field_a_term": "Feasibility margin",
          "field_b_term": "Actuator/sensor delay tolerance",
          "note": "Determines when guarantees remain valid."
        }
      ],
      "references": [
        {
          "doi": "10.1098/rsta.1922.0009",
          "note": "Fisher (1922) estimation and information."
        },
        {
          "doi": "10.1017/S0962492910000061",
          "note": "Stuart (2010) Bayesian inverse-problem foundations."
        }
      ],
      "last_reviewed": "2026-05-08",
      "file": "cross-domain/control-engineering-medicine/b-control-barrier-functions-x-safe-artificial-pancreas.yaml"
    },
    {
      "id": "b-control-lyapunov-functions-x-antibiotic-cycling-policies",
      "title": "Control Lyapunov function design connects nonlinear control guarantees to antibiotic cycling policy synthesis.",
      "source_domain": "",
      "target_domain": "",
      "status": "proposed",
      "bridge_claim": "Speculative analogy: Antibiotic scheduling can be treated as a constrained control problem where Lyapunov-like resistance potentials are driven downward while preserving patient-level efficacy constraints.",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-lyapunov-constrained-antibiotic-cycling-reduces-resistance-and-relapse"
      ],
      "communication_gap": "Communities use different terminology and validation conventions, masking transferable method equivalence.",
      "translation_table": [],
      "references": [
        {
          "url": "https://epubs.siam.org/doi/10.1137/S0363012993259931",
          "note": "Control Lyapunov function stabilization theorem."
        }
      ],
      "last_reviewed": "2026-05-08",
      "file": "cross-domain/control-engineering-medicine/b-control-lyapunov-functions-x-antibiotic-cycling-policies.yaml"
    },
    {
      "id": "b-hamilton-jacobi-bellman-x-adaptive-radiotherapy",
      "title": "Hamilton-Jacobi-Bellman control equations provide a principled backbone for adaptive radiotherapy scheduling.",
      "source_domain": "",
      "target_domain": "",
      "status": "proposed",
      "bridge_claim": "Speculative analogy: Hamilton-Jacobi-Bellman control equations provide a principled backbone for adaptive radiotherapy scheduling.",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-hjb-derived-adaptive-fractionation-improves-tumor-control-toxicity-tradeoff"
      ],
      "communication_gap": "The two communities use different notation, benchmarks, and publication venues, which obscures transferable mathematical structure.",
      "translation_table": [],
      "references": [
        {
          "doi": "10.1016/S0005-1098(99)00152-3",
          "note": "Numerical HJB methods."
        }
      ],
      "last_reviewed": "2026-05-08",
      "file": "cross-domain/control-engineering-medicine/b-hamilton-jacobi-bellman-x-adaptive-radiotherapy.yaml"
    },
    {
      "id": "b-variational-data-assimilation-x-personalized-glucose-forecasting",
      "title": "Variational data assimilation can transfer from geophysical forecasting to personalized glucose trajectory estimation.",
      "source_domain": "",
      "target_domain": "",
      "status": "proposed",
      "bridge_claim": "Speculative analogy: Variational data assimilation can transfer from geophysical forecasting to personalized glucose trajectory estimation.",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-variational-assimilation-derived-glucose-predictions-outperform-sliding-window-baselines"
      ],
      "communication_gap": "Domain-specific vocabularies and benchmark conventions obscure transferable mathematical structure.",
      "translation_table": [],
      "references": [
        {
          "doi": "10.1115/1.3662552",
          "note": "Foundational state-estimation framing used as transfer anchor."
        }
      ],
      "last_reviewed": "2026-05-08",
      "file": "cross-domain/control-engineering-medicine/b-variational-data-assimilation-x-personalized-glucose-forecasting.yaml"
    },
    {
      "id": "b-phase-response-curves-x-adaptive-deep-brain-stimulation-timing",
      "title": "Phase-response-curve analysis can transfer from oscillator control to adaptive deep brain stimulation timing.",
      "source_domain": "",
      "target_domain": "",
      "status": "proposed",
      "bridge_claim": "Speculative analogy: Phase-response-curve analysis can transfer from oscillator control to adaptive deep brain stimulation timing.",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-phase-response-adaptive-dbs-reduces-off-target-neural-entrainment"
      ],
      "communication_gap": "Domain-specific vocabularies and benchmark conventions obscure transferable mathematical structure.",
      "translation_table": [],
      "references": [
        {
          "doi": "10.1007/BF00337259",
          "note": "Oscillation phase-response analysis groundwork for controlled perturbations."
        }
      ],
      "last_reviewed": "2026-05-08",
      "file": "cross-domain/control-engineering-neurology/b-phase-response-curves-x-adaptive-deep-brain-stimulation-timing.yaml"
    },
    {
      "id": "b-kibble-zurek-morphogenesis",
      "title": "The Kibble-Zurek mechanism connects early-universe cosmology to embryonic symmetry breaking",
      "source_domain": "",
      "target_domain": "",
      "status": "proposed",
      "bridge_claim": "The Kibble-Zurek (KZ) mechanism — originally derived to predict defect density after the symmetry-breaking phase transitions that occurred microseconds after the Big Bang — makes quantitatively identical predictions when applied to spontaneous symmetry breaking in developing embryos (axis determinat",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-kibble-zurek-polarity-scaling"
      ],
      "communication_gap": "The KZ mechanism in biology is discussed in a handful of physics papers (Zurek himself noted the biological analogy in 2009) but has almost no penetration into mainstream developmental biology literature. The barrier is linguistic: embryologists use \"symmetry breaking\" informally to mean \"a cell picks a side,\" while physicists mean a specific order-parameter transition with a correlation length and universality class. A translation table (above) and a simple C. elegans protocol could bridge this gap within a single experimental paper.\n",
      "translation_table": [
        {
          "field_a_term": "cosmological phase transition (Higgs, QCD crossover)",
          "field_b_term": "cytoskeletal symmetry-breaking transition (Par polarisation, actomyosin cortex)"
        },
        {
          "field_a_term": "quench rate (universe cooling rate)",
          "field_b_term": "rate of cell-cycle progression / fertilisation-induced calcium wave speed"
        },
        {
          "field_a_term": "cosmic string / monopole defect",
          "field_b_term": "polarity error domain boundary / chirality reversal patch"
        },
        {
          "field_a_term": "defect density power-law (ξ ∝ τ_Q^ν/(1+νz))",
          "field_b_term": "polarity error rate as a function of temperature ramp / drug-induced quench speed"
        },
        {
          "field_a_term": "Ginzburg temperature (T_G — onset of mean-field breakdown)",
          "field_b_term": "cell-size threshold at which cortical fluctuations dominate over mean-field PAR dynamics"
        },
        {
          "field_a_term": "causal horizon (light cone at transition)",
          "field_b_term": "correlation length of PAR complex diffusion at symmetry-breaking onset"
        }
      ],
      "references": [
        {
          "arxiv": "cond-mat/9803199",
          "note": "Zurek, Dorner, Zoller (2005) — KZ mechanism in ion traps, establishing experimental accessibility"
        },
        {
          "arxiv": "quant-ph/0503021",
          "note": "KZ scaling in quantum phase transitions — generalises the mechanism beyond thermal transitions"
        },
        {
          "doi": "10.1038/nature08265",
          "note": "KZ defects observed in ultracold Bose gas — most direct experimental test to date"
        },
        {
          "arxiv": "0811.3743",
          "note": "Zurek (2009) — explicit note on biological symmetry breaking and KZ analogy"
        },
        {
          "doi": "10.7554/eLife.30606",
          "note": "C. elegans PAR polarisation kinetics — the embryonic system best suited for KZ quench experiments"
        }
      ],
      "last_reviewed": "2026-05-04",
      "file": "cross-domain/cosmology-biology/b-kibble-zurek-morphogenesis.yaml"
    },
    {
      "id": "b-cosmic-inflation-x-epidemic-phase-plane-expansion",
      "title": "Cosmic inflation stretches comoving scales exponentially when the scale factor accelerates — compartmental SIR-like epidemic models display transient phases where infected proportion grows approximately exponentially when R_eff≫1 — **this bridge is deliberately speculative metaphor**, not a physical reduction of cosmology to infectious disease; flag strongly before citing outside pedagogy.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "proposed",
      "bridge_claim": "Qualitative similarity: both domains plot autonomous flows on reduced phase planes where certain regimes exhibit rapid separation of trajectories resembling exponential widening — inflation uses slow-roll potentials and Friedmann equations; epidemics use mass-action bilinear incidence producing expo",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-shared-tangent-field-exponential-region-only-logarithmic-visual-overlap"
      ],
      "communication_gap": "Cosmologists fear sensational linkage between cosmology and disease; epidemiologists rarely benefit unless analogy strictly confined to dynamical-systems pedagogy — transparency essential.\n",
      "translation_table": [
        {
          "field_a_term": "Nearly exponential scale-factor growth during inflationary epoch",
          "field_b_term": "Early epidemic exponential rise of infections when susceptible pool abundant",
          "note": "**Speculative metaphor only** — causal mechanisms unrelated."
        },
        {
          "field_a_term": "Slow-roll parameters controlling exit from inflation",
          "field_b_term": "Interventions / depletion reducing effective reproduction number R_eff",
          "note": "Both describe exiting rapid-expansion regimes — analogy pedagogical only."
        },
        {
          "field_a_term": "Horizon problem alleviation via superluminal expansion (physics narrative)",
          "field_b_term": "Rapid spatial seeding before immunity builds (informal storytelling parallel)",
          "note": "Risk of misleading analogy — label explicitly for outreach ethics."
        }
      ],
      "references": [
        {
          "doi": "10.1103/PhysRevD.23.347",
          "note": "Guth (1981) — inflationary universe proposal"
        },
        {
          "doi": "10.1098/rspa.1927.0118",
          "note": "Kermack & McKendrick (1927) — deterministic epidemic modeling foundations"
        }
      ],
      "last_reviewed": "2026-05-07",
      "file": "cross-domain/cosmology-epidemiology/b-cosmic-inflation-x-epidemic-phase-plane-expansion.yaml"
    },
    {
      "id": "b-neural-cde-x-irregular-icu-trajectory-modeling",
      "title": "Neural controlled differential equations bridge rough-path theory and irregular ICU trajectory modeling for event forecasting under missingness.",
      "source_domain": "",
      "target_domain": "",
      "status": "proposed",
      "bridge_claim": "Speculative analogy (to be empirically validated): neural CDEs translate irregularly sampled physiologic streams into continuous control paths, mirroring how rough-path summaries preserve temporal signal geometry beyond simple interpolation.",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-neural-cde-models-improve-icu-event-lead-time"
      ],
      "communication_gap": "ML publications optimize predictive metrics, while ICU informatics prioritizes calibration, alarm burden, and workflow compatibility.",
      "translation_table": [
        {
          "field_a_term": "control path",
          "field_b_term": "interpolated physiologic stream",
          "note": "The control path encodes irregular observation timing and values."
        },
        {
          "field_a_term": "vector field dynamics",
          "field_b_term": "latent patient-state evolution",
          "note": "Both define continuous hidden-state updates driven by observed signals."
        },
        {
          "field_a_term": "signature-like temporal representation",
          "field_b_term": "clinically meaningful trajectory motifs",
          "note": "Temporal motifs can be mapped to deterioration patterns."
        }
      ],
      "references": [
        {
          "arxiv": "2005.08926",
          "note": "Neural Controlled Differential Equations for irregular time series."
        },
        {
          "url": "https://physionet.org/content/mimiciv/2.2/",
          "note": "MIMIC-IV critical-care dataset context."
        }
      ],
      "last_reviewed": "2026-05-08",
      "file": "cross-domain/critical-care-machine-learning/b-neural-cde-x-irregular-icu-trajectory-modeling.yaml"
    },
    {
      "id": "b-dna-replication-x-error-correction",
      "title": "DNA replication x Error-correcting codes - polymerase proofreading as channel coding\n",
      "source_domain": "",
      "target_domain": "",
      "status": "proposed",
      "bridge_claim": "DNA replication achieves an error rate of approximately 10^-9 per base through a three-stage error-correction pipeline (polymerase insertion selectivity 10^-5, 3'to5' exonuclease proofreading 10^-2, post-replication mismatch repair 10^-3) that is functionally analogous to concatenated error-correcti",
      "open_unknowns": [],
      "related_hypotheses": [],
      "communication_gap": "Molecular biologists studying DNA replication mechanisms (Kornberg 1956, Brutlag & Kornberg 1972) and information theorists developing error-correcting codes (Shannon 1948, Gallager 1962) built parallel frameworks for high-fidelity transmission; Hopfield (1974) and Ninio (1975) introduced kinetic proofreading connecting the fields, but the quantitative equivalence with turbo/LDPC codes and the information-theoretic capacity of the DNA replication channel has not been fully worked out.\n",
      "translation_table": [
        {
          "field_a_term": "DNA polymerase nucleotide selectivity (1 error per 10^5 bases) (biochemistry)",
          "field_b_term": "raw channel capacity / first-stage code rate in concatenated code (information theory)",
          "note": "Polymerase selectivity is the inner code; it provides a first reduction in error rate before proofreading"
        },
        {
          "field_a_term": "3 prime to 5 prime exonuclease proofreading (biochemistry)",
          "field_b_term": "syndrome decoding / error correction in outer code (information theory)",
          "note": "Proofreading detects and removes mismatched bases analogous to syndrome decoding; it checks consistency of the newly synthesized strand"
        },
        {
          "field_a_term": "mismatch repair (MMR) pathway (molecular biology)",
          "field_b_term": "iterative belief propagation / second decoding pass in turbo/LDPC codes (information theory)",
          "note": "MMR is a third coding layer that catches errors escaping proofreading; the three-layer architecture mirrors concatenated coding gain"
        },
        {
          "field_a_term": "DNA replication fidelity Q = product of three error rate reductions (molecular biology)",
          "field_b_term": "coding gain of concatenated error-correcting code (information theory)",
          "note": "The multiplicative error reduction (10^-5 x 10^-2 x 10^-3 = 10^-10) mirrors concatenated coding gain compounding individual stage gains"
        }
      ],
      "references": [
        {
          "doi": "10.1146/annurev.biochem.74.082803.133250",
          "note": "Kunkel & Erie (2005) - DNA mismatch repair; Annu Rev Biochem 74:681 — three-stage error correction pipeline"
        }
      ],
      "last_reviewed": "2026-05-07",
      "file": "cross-domain/cs-biology/b-dna-replication-x-error-correction.yaml"
    },
    {
      "id": "b-genetic-algorithm-x-natural-selection",
      "title": "Genetic algorithms x Natural selection — evolution as optimization\n",
      "source_domain": "",
      "target_domain": "",
      "status": "proposed",
      "bridge_claim": "Genetic algorithms (mutation, crossover, selection on fitness) are a direct mathematical abstraction of natural selection; Holland's schema theorem proves that GAs implicitly sample an exponential number of schemata simultaneously (building block hypothesis), explaining why evolution efficiently sea",
      "open_unknowns": [],
      "related_hypotheses": [],
      "communication_gap": "Computer scientists developing GAs from the 1960s-70s drew explicit inspiration from biology but the mathematical formalism (schema theorem, No Free Lunch) diverged from population genetics (Price equation, Fisher's fundamental theorem); cross-disciplinary collaboration between EC researchers and evolutionary biologists is growing but remains fragmented.\n",
      "translation_table": [
        {
          "field_a_term": "chromosome / binary string (genetic algorithm)",
          "field_b_term": "genome / DNA sequence (evolutionary biology)",
          "note": "The GA chromosome encodes a solution; the biological genome encodes a phenotype — both are subject to heritable variation"
        },
        {
          "field_a_term": "fitness function f(x) (genetic algorithm)",
          "field_b_term": "reproductive fitness W (biology)",
          "note": "Selection pressure in both cases preferentially propagates high-fitness individuals; the fitness landscape concept is shared"
        },
        {
          "field_a_term": "crossover / recombination operator (GA)",
          "field_b_term": "sexual recombination / meiosis (biology)",
          "note": "Both shuffle genetic material between parent solutions, generating novel combinations unexplored by single-parent variation"
        },
        {
          "field_a_term": "schema / building block (GA theory)",
          "field_b_term": "linkage disequilibrium / epistatic block (population genetics)",
          "note": "Schemata with above-average fitness grow exponentially; analogous to favorable haplotype blocks spreading in a population"
        }
      ],
      "references": [
        {
          "doi": "10.1016/0004-3702(94)90132-5",
          "note": "Mitchell et al. (1994) - When will a genetic algorithm outperform hill climbing? A survey; AI Journal"
        }
      ],
      "last_reviewed": "2026-05-07",
      "file": "cross-domain/cs-biology/b-genetic-algorithm-x-natural-selection.yaml"
    },
    {
      "id": "b-neural-architecture-search-x-evolutionary-biology",
      "title": "Neural Architecture Search x Evolutionary Biology - NAS as artificial evolution\n",
      "source_domain": "",
      "target_domain": "",
      "status": "proposed",
      "bridge_claim": "Neural architecture search (NAS) algorithms - NEAT, evolutionary NAS, AmoebaNet - mimic biological evolution: networks are organisms, architectures are genotypes, validation accuracy is fitness, and mutations/crossovers generate variation; NAS rediscovered architectural motifs (skip connections, att",
      "open_unknowns": [],
      "related_hypotheses": [],
      "communication_gap": "Evolutionary computation and neural network design were connected from the beginning (Holland 1975, NEAT 2002) but NAS at scale became practical only with hardware acceleration (2017+); conversely, evolutionary biologists rarely use NAS as a computational model for studying evolvability and epistasis.\n",
      "translation_table": [
        {
          "field_a_term": "Neural network architecture (layer types, connections)",
          "field_b_term": "Genotype (genetic specification of organism)",
          "note": "The architecture specification (which layers exist, how they are connected, what operations they perform) is the genotype; the trained network weights given a fixed architecture are the phenotype expression of that genotype.\n"
        },
        {
          "field_a_term": "Validation accuracy (after training on fixed dataset)",
          "field_b_term": "Fitness (survival and reproductive success)",
          "note": "Validation accuracy is the fitness function; architectures achieving higher accuracy survive and reproduce (are selected for mutation/crossover); the fitness landscape determines evolutionary dynamics of the NAS search.\n"
        },
        {
          "field_a_term": "Architecture mutation (add layer, change operation type)",
          "field_b_term": "Point mutation / structural gene rearrangement",
          "note": "Architecture mutations are analogous to structural gene mutations; adding a skip connection resembles gene duplication; changing an operation type resembles a missense mutation affecting protein function.\n"
        },
        {
          "field_a_term": "Supernet (architecture sharing weight inheritance)",
          "field_b_term": "Common genetic heritage (conserved sequence motifs)",
          "note": "In one-shot NAS methods, subnets share weights from a supernet, analogous to how all organisms share conserved ancestral sequences; the shared weights encode architectural priors like common evolutionary history encodes conserved motifs.\n"
        }
      ],
      "references": [
        {
          "doi": "10.48550/arXiv.1802.01548",
          "note": "Real et al. (2019) - regularized evolution for image classifier architecture search (AmoebaNet); AAAI 2019"
        },
        {
          "doi": "10.48550/arXiv.1712.00559",
          "note": "Zoph et al. (2018) - learning transferable architectures for scalable image recognition; CVPR 2018"
        }
      ],
      "last_reviewed": "2026-05-07",
      "file": "cross-domain/cs-biology/b-neural-architecture-search-x-evolutionary-biology.yaml"
    },
    {
      "id": "b-sparse-coding-x-neural-basis",
      "title": "Compressed Sensing x Sparse Coding — neural basis functions as overcomplete dictionaries\n",
      "source_domain": "",
      "target_domain": "",
      "status": "proposed",
      "bridge_claim": "Visual cortex V1 simple cells learn sparse overcomplete representations of natural images (Olshausen & Field 1996) that are equivalent to dictionary learning in compressed sensing; the cortex solves a sparse approximation problem using a biologically plausible version of ISTA (iterative shrinkage-th",
      "open_unknowns": [],
      "related_hypotheses": [],
      "communication_gap": "Compressed sensing was developed by Candès-Romberg-Tao and Donoho (2006) as a mathematical theory of signal reconstruction; Olshausen & Field developed sparse coding for neuroscience in 1996 without the CS mathematical framework; the equivalence was recognized retrospectively, delaying a decade of potential cross-fertilization between neural coding theory and compressed sensing algorithms.\n",
      "translation_table": [
        {
          "field_a_term": "Overcomplete dictionary (compressed sensing)",
          "field_b_term": "Simple cell receptive field basis in V1",
          "note": "V1 simple cells form an overcomplete basis (more cells than pixels) with Gabor-like receptive fields; in CS, an overcomplete dictionary D enables sparse representation x ≈ Dα with α sparse — exactly the neural sparse code.\n"
        },
        {
          "field_a_term": "L1-norm minimization (LASSO)",
          "field_b_term": "Sparse coding objective in V1",
          "note": "Olshausen & Field minimize E = ||x - Dα||² + λ||α||₁; the L1 penalty enforces sparsity and is equivalent to the LASSO estimator in compressed sensing.\n"
        },
        {
          "field_a_term": "ISTA (iterative shrinkage-thresholding)",
          "field_b_term": "Neural dynamics in V1 lateral inhibition network",
          "note": "ISTA (soft-thresholding iterations) can be implemented by a lateral inhibition network where neuron activations represent the sparse coefficients α; Rozell et al. (2008) showed this is a plausible V1 circuit mechanism.\n"
        },
        {
          "field_a_term": "Restricted isometry property (RIP)",
          "field_b_term": "Incoherence of neural basis functions",
          "note": "The RIP guarantees unique sparse recovery in CS; neural basis incoherence (Gabor filters at different orientations/frequencies are nearly orthogonal) ensures different stimuli activate different sparse subsets of V1 neurons.\n"
        }
      ],
      "references": [
        {
          "doi": "10.1038/381520a0",
          "note": "Olshausen & Field (1996) — Emergence of simple-cell receptive field properties by learning a sparse code; Nature 381:607"
        }
      ],
      "last_reviewed": "2026-05-07",
      "file": "cross-domain/cs-biology/b-sparse-coding-x-neural-basis.yaml"
    },
    {
      "id": "b-game-theory-x-cryptography",
      "title": "Game theory x Cryptography - Nash equilibrium as protocol security\n",
      "source_domain": "",
      "target_domain": "",
      "status": "proposed",
      "bridge_claim": "Cryptographic protocol security (no computationally bounded adversary can profitably deviate) is a Nash equilibrium condition in a game where parties are rational agents maximizing expected utility; rational cryptography formalizes cryptographic security as computational game theory — zero-knowledge",
      "open_unknowns": [],
      "related_hypotheses": [],
      "communication_gap": "Cryptographers developing protocol security proofs (Goldwasser, Micali 1982) and game theorists developing mechanism design (Hurwicz, Maskin, Myerson — Nobel 2007) worked independently; the rational cryptography program (Halpern & Teague 2004, Katz 2008) formalized the connection, enabling blockchain incentive mechanism design — but the full synthesis (when game-theoretic equilibrium coincides with cryptographic security, and when it does not) remains an active research area.\n",
      "translation_table": [
        {
          "field_a_term": "cryptographic security under computational assumptions (cryptography)",
          "field_b_term": "Nash equilibrium in game with computationally bounded strategies (game theory)",
          "note": "Security = no polynomial-time adversary can improve utility by deviating; this is Nash equilibrium where strategies are PPT algorithms"
        },
        {
          "field_a_term": "zero-knowledge proof (ZKP) soundness and zero-knowledge properties (cryptography)",
          "field_b_term": "incentive-compatible mechanism where prover cannot gain by lying (mechanism design)",
          "note": "ZKP soundness = no profitable deviation for dishonest prover; zero-knowledge = honest verifier gains no information beyond validity"
        },
        {
          "field_a_term": "blockchain mining Nash equilibrium (selfish mining) (cryptography/distributed systems)",
          "field_b_term": "game-theoretic equilibrium of mining pool strategy game (game theory)",
          "note": "Eyal & Sirer (2014) showed selfish mining is a Nash equilibrium that destabilizes blockchain security — game theory predicts the attack"
        },
        {
          "field_a_term": "protocol composition in multi-party computation (cryptography)",
          "field_b_term": "iterated game / mechanism composition in game theory (game theory)",
          "note": "Universal composability (Canetti 2001) is the cryptographic analog of Nash equilibrium in iterated games — protocols that are secure when run concurrently"
        }
      ],
      "references": [
        {
          "doi": "10.1007/3-540-44647-8_1",
          "note": "Goldreich (2001) - Foundations of Cryptography Vol. 1; security definitions as game-theoretic formulations"
        }
      ],
      "last_reviewed": "2026-05-07",
      "file": "cross-domain/cs-economics/b-game-theory-x-cryptography.yaml"
    },
    {
      "id": "b-mechanism-design-x-market-equilibrium",
      "title": "Mechanism design x Market equilibrium — incentive compatibility as stability\n",
      "source_domain": "",
      "target_domain": "",
      "status": "proposed",
      "bridge_claim": "Mechanism design (designing rules so truthful reporting is the dominant strategy) and competitive market equilibrium (where no agent can profitably deviate) are dual formulations of the same incentive compatibility condition; the revelation principle bridges them formally.\n",
      "open_unknowns": [],
      "related_hypotheses": [],
      "communication_gap": "Mechanism design emerged from game theory and computer science auction theory; competitive equilibrium from Walrasian economics. Myerson's Nobel work unified them formally but the connection is not standard in economics curricula.\n",
      "translation_table": [
        {
          "field_a_term": "Dominant strategy incentive compatibility (DSIC)",
          "field_b_term": "Nash equilibrium strategy profile",
          "note": "DSIC is the stronger condition (truthfulness dominant regardless of others); Nash equilibrium is the weaker condition (no profitable unilateral deviation) — both formalize incentive alignment.\n"
        },
        {
          "field_a_term": "Revelation principle (direct mechanism equivalence)",
          "field_b_term": "Arrow-Debreu competitive equilibrium existence",
          "note": "Both results guarantee existence and uniqueness of incentive-compatible outcomes under analogous conditions; the revelation principle is the mechanism design analog of Walras's existence theorem.\n"
        }
      ],
      "references": [
        {
          "doi": "10.2307/1912601",
          "note": "Myerson (1981) — optimal auction design; the foundational mechanism design paper"
        }
      ],
      "last_reviewed": "2026-05-07",
      "file": "cross-domain/cs-economics/b-mechanism-design-x-market-equilibrium.yaml"
    },
    {
      "id": "b-boolean-satisfiability-x-spin-glass",
      "title": "Boolean satisfiability x Spin glass — NP-hardness as frustrated frustration\n",
      "source_domain": "",
      "target_domain": "",
      "status": "proposed",
      "bridge_claim": "The satisfiability phase transition (SAT/UNSAT boundary near clause-to-variable ratio alpha approximately 4.27 for 3-SAT) coincides with a spin-glass phase transition in the random K-SAT energy landscape; NP-hardness emerges from the same frustration mechanism as glassy dynamics.\n",
      "open_unknowns": [],
      "related_hypotheses": [],
      "communication_gap": "Complexity theory and statistical physics developed independently; the cavity method from spin glass theory (Mezard & Parisi) was only imported into SAT solving (survey propagation) in the early 2000s.\n",
      "translation_table": [
        {
          "field_a_term": "Boolean variable assignment (+1/-1)",
          "field_b_term": "Ising spin state (up/down)",
          "note": "Each Boolean variable maps directly to a spin; clauses map to interaction terms in a multi-spin Hamiltonian.\n"
        },
        {
          "field_a_term": "SAT/UNSAT phase boundary at alpha_c",
          "field_b_term": "Spin glass freezing temperature T_g",
          "note": "Both mark the onset of exponential search complexity; the algorithmic phase transition mirrors the equilibrium phase transition in the energy landscape.\n"
        }
      ],
      "references": [
        {
          "doi": "10.1126/science.1073287",
          "note": "Mezard & Montanari (2002) — analytic and algorithmic solution of random satisfiability problems"
        }
      ],
      "last_reviewed": "2026-05-07",
      "file": "cross-domain/cs-math/b-boolean-satisfiability-x-spin-glass.yaml"
    },
    {
      "id": "b-compressed-sensing-x-sparse-recovery",
      "title": "Compressed sensing x Sparse signal recovery — underdetermined systems and L1 minimization\n",
      "source_domain": "",
      "target_domain": "",
      "status": "proposed",
      "bridge_claim": "Compressed sensing proves that a sparse signal in R^n can be exactly recovered from O(k log n) random linear measurements (far fewer than n) by L1 minimization; this connects the restricted isometry property (RIP) of random matrices to convex optimization geometry, enabling MRI acceleration by 10x a",
      "open_unknowns": [],
      "related_hypotheses": [],
      "communication_gap": "Signal processing engineers and pure mathematicians studying convex geometry worked in isolation until Candès, Romberg, and Tao (2006) and Donoho (2006) simultaneously showed that L1 minimization achieves exact sparse recovery — a result with implications neither community had fully anticipated.\n",
      "translation_table": [
        {
          "field_a_term": "sparsity k in signal space (signal processing)",
          "field_b_term": "number of non-zero coefficients in a basis (mathematics)",
          "note": "A signal is k-sparse if at most k of its n basis coefficients are nonzero; sparsity is the key structural assumption"
        },
        {
          "field_a_term": "random measurement matrix Φ (compressed sensing)",
          "field_b_term": "random linear map satisfying RIP (mathematics)",
          "note": "Gaussian or Bernoulli random matrices satisfy the RIP with overwhelming probability, guaranteeing recovery"
        },
        {
          "field_a_term": "L1 minimization / LASSO (optimization)",
          "field_b_term": "convex relaxation of L0 sparse constraint (mathematics)",
          "note": "L1 is the tightest convex surrogate for sparsity; the L1 ball's geometry causes solutions to be corner-sparse"
        }
      ],
      "references": [
        {
          "doi": "10.1109/TIT.2006.871582",
          "note": "Candès & Tao (2006) - Near-optimal signal recovery from random projections; IEEE Trans Inf Theory 52:5406"
        }
      ],
      "last_reviewed": "2026-05-07",
      "file": "cross-domain/cs-math/b-compressed-sensing-x-sparse-recovery.yaml"
    },
    {
      "id": "b-graph-neural-network-x-spectral-graph-theory",
      "title": "Graph neural networks x Spectral graph theory — convolution on irregular domains\n",
      "source_domain": "",
      "target_domain": "",
      "status": "proposed",
      "bridge_claim": "Graph convolutional networks perform convolution in the spectral domain of the graph Laplacian; filters are polynomials of eigenvalues (spectral filters), and message passing is equivalent to diffusion on the graph governed by the heat kernel — connecting deep learning to spectral graph theory and e",
      "open_unknowns": [],
      "related_hypotheses": [],
      "communication_gap": "Graph theorists studying spectral properties and machine learning researchers developing neural architectures developed related tools without much cross-pollination; spectral graph theory frameworks for GNNs emerged in 2013-2016, but the deeper connections to Riemannian geometry and diffusion PDEs on manifolds are still being developed by a small interdisciplinary community.\n",
      "translation_table": [
        {
          "field_a_term": "graph Laplacian L = D - A (graph theory)",
          "field_b_term": "discretized Laplace-Beltrami operator on manifold (differential geometry)",
          "note": "The graph Laplacian generalizes the continuous Laplacian; its eigenvectors are the graph Fourier basis analogous to Fourier modes on a torus"
        },
        {
          "field_a_term": "spectral filter h(lambda_i) (graph neural network)",
          "field_b_term": "function of Laplacian eigenvalues (spectral graph theory)",
          "note": "GCN filters are polynomials of L applied in the spectral domain; Chebyshev polynomial filters localize computation to k-hop neighborhoods"
        },
        {
          "field_a_term": "message passing / neighborhood aggregation (GNN)",
          "field_b_term": "diffusion on graph via heat equation (graph theory / physics)",
          "note": "Each GNN layer is one step of a discretized diffusion process; the number of layers controls the diffusion radius"
        },
        {
          "field_a_term": "graph isomorphism (graph theory)",
          "field_b_term": "Weisfeiler-Leman graph isomorphism test (combinatorics)",
          "note": "Standard message-passing GNNs are bounded in power by the 1-WL test; more powerful GNNs require higher-order WL hierarchies"
        }
      ],
      "references": [
        {
          "doi": "10.48550/arXiv.1609.02907",
          "note": "Kipf & Welling (2017) - Semi-supervised classification with graph convolutional networks; arXiv:1609.02907"
        }
      ],
      "last_reviewed": "2026-05-07",
      "file": "cross-domain/cs-math/b-graph-neural-network-x-spectral-graph-theory.yaml"
    },
    {
      "id": "b-pagerank-x-markov-chain",
      "title": "PageRank x Markov chain stationary distribution - web ranking as random walk\n",
      "source_domain": "",
      "target_domain": "",
      "status": "proposed",
      "bridge_claim": "Google's PageRank algorithm computes the stationary distribution of a random walk on the web graph with teleportation probability alpha; this is exactly the left eigenvector of the Google matrix G = alpha * H + (1-alpha) * e * v^T (Markov transition matrix), making web search a problem in Markov cha",
      "open_unknowns": [],
      "related_hypotheses": [],
      "communication_gap": "Web engineers developing search ranking (Brin and Page 1998) and probabilists studying Markov chains and random walks developed independent frameworks; the connection was implicit in the original PageRank paper but the Markov chain interpretation with Perron-Frobenius theory was formalized by Langville and Meyer (2006) — enabling mixing time analysis, sensitivity analysis, and distributed algorithms from Markov chain theory to improve practical search ranking.\n",
      "translation_table": [
        {
          "field_a_term": "PageRank score pi(v) of web page v (computer science)",
          "field_b_term": "stationary probability of state v in ergodic Markov chain (mathematics)",
          "note": "PageRank is the stationary distribution; high-rank pages are those the random surfer spends most time on at stationarity"
        },
        {
          "field_a_term": "hyperlink from page i to page j (web graph)",
          "field_b_term": "transition probability p(i -> j) in Markov chain (probability theory)",
          "note": "Each hyperlink defines a transition probability; pages with many outlinks have uniform transition probabilities (equal weight)"
        },
        {
          "field_a_term": "teleportation probability (1-alpha) to random page (PageRank)",
          "field_b_term": "mixing term ensuring irreducibility and aperiodicity (Markov chain theory)",
          "note": "Teleportation makes the chain irreducible (all states reachable) and aperiodic, guaranteeing unique stationary distribution"
        },
        {
          "field_a_term": "power iteration for PageRank convergence (numerical linear algebra)",
          "field_b_term": "Markov chain Monte Carlo (MCMC) convergence to stationary distribution (statistics)",
          "note": "PageRank power iteration is MCMC for the Google matrix stationary distribution; convergence rate = second eigenvalue"
        }
      ],
      "references": [
        {
          "doi": "10.1016/S0169-7552(98)00110-X",
          "note": "Brin & Page (1998) - The anatomy of a large-scale hypertextual web search engine; Computer Networks 30:107"
        }
      ],
      "last_reviewed": "2026-05-07",
      "file": "cross-domain/cs-math/b-pagerank-x-markov-chain.yaml"
    },
    {
      "id": "b-reinforcement-learning-x-bellman-equation",
      "title": "Reinforcement learning x Bellman equation - optimal control as dynamic programming\n",
      "source_domain": "",
      "target_domain": "",
      "status": "proposed",
      "bridge_claim": "Reinforcement learning (Q-learning, policy gradients, TD-learning) solves the Bellman optimality equation V*(s) = max_a [R(s,a) + gamma E[V*(s')]] via function approximation; this connects RL to Bellman's dynamic programming principle, the Hamilton-Jacobi-Bellman PDE of continuous optimal control, a",
      "open_unknowns": [],
      "related_hypotheses": [],
      "communication_gap": "Bellman (1957) developed dynamic programming for control theory and Watkins (1989) developed Q-learning independently in AI; the connection was clear in early RL work (Sutton & Barto 1988) but the explicit identification of deep RL with HJB PDEs and Pontryagin's principle was systematized only later — enabling continuous control algorithms (DDPG, SAC) that directly import optimal control theory into the RL framework.\n",
      "translation_table": [
        {
          "field_a_term": "Q-function Q(s,a) in Q-learning (reinforcement learning)",
          "field_b_term": "value function V*(x) in optimal control / HJB equation (control theory)",
          "note": "Q(s,a) is the action-value function; the Bellman optimality equation Q*(s,a) = R + gamma max Q*(s',a') is the discrete HJB equation"
        },
        {
          "field_a_term": "temporal difference error delta = r + gamma V(s') - V(s) (RL)",
          "field_b_term": "Hamiltonian H(x, u, p) in Pontryagin maximum principle (control theory)",
          "note": "TD error is the discrete residual of the HJB equation; policy gradient methods compute the Hamiltonian gradient with respect to control u"
        },
        {
          "field_a_term": "discount factor gamma in RL (reinforcement learning)",
          "field_b_term": "time horizon / interest rate in discounted LQR (control theory)",
          "note": "Gamma = exp(-rho dt) where rho is the continuous discount rate; gamma -> 1 corresponds to infinite-horizon undiscounted optimal control"
        },
        {
          "field_a_term": "actor-critic architecture (RL)",
          "field_b_term": "state-costate decomposition in Pontryagin maximum principle (control theory)",
          "note": "Critic = value function = costate variable (Lagrange multiplier for dynamics); actor = policy = control law derived from Hamiltonian"
        }
      ],
      "references": [
        {
          "doi": "10.1007/BF00992696",
          "note": "Watkins & Dayan (1992) - Q-learning; Machine Learning 8:279 — proves convergence of Q-learning to Bellman optimum"
        }
      ],
      "last_reviewed": "2026-05-07",
      "file": "cross-domain/cs-math/b-reinforcement-learning-x-bellman-equation.yaml"
    },
    {
      "id": "b-satisfiability-x-constraint-propagation",
      "title": "Boolean satisfiability ↔ Constraint propagation — arc consistency as logical deduction",
      "source_domain": "",
      "target_domain": "",
      "status": "proposed",
      "bridge_claim": "Arc consistency algorithms (AC-3) in constraint satisfaction problems perform the same logical deduction as unit propagation in DPLL SAT solvers; both compute the fixpoint of a constraint propagation operator, connecting the CSP and SAT communities through a shared algebraic structure.",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-satisfiability-x-constraint-propagation"
      ],
      "communication_gap": "The CSP and SAT communities evolved from different roots — CSP from AI planning and scheduling (1970s), SAT from computational complexity theory. The mathematical equivalence of unit propagation and arc consistency was not formalised until the late 1990s, despite decades of parallel development of very similar algorithms in separate conference venues.",
      "translation_table": [
        {
          "field_a_term": "arc consistency (AC-3) in CSP — remove domain values violating binary constraints",
          "field_b_term": "unit propagation in DPLL SAT — assign forced literals, simplify clauses",
          "note": "Both are fixpoint computations of a constraint propagation operator"
        },
        {
          "field_a_term": "CSP domain value removal (pruning infeasible assignments)",
          "field_b_term": "SAT unit clause simplification (forced literal propagation)",
          "note": "Domain reduction in CSP ≡ clause shortening in SAT"
        },
        {
          "field_a_term": "backtracking search in CSP (explore domain combinations)",
          "field_b_term": "DPLL branching (split on variable assignment)",
          "note": "Both implement systematic exhaustive search with propagation"
        }
      ],
      "references": [
        {
          "doi": "10.1145/321356.321362",
          "note": "Davis, Putnam, Logemann & Loveland (1962) — DPLL algorithm; J ACM 9:394"
        },
        {
          "doi": "10.1016/0004-3702(77)90007-8",
          "note": "Mackworth (1977) — consistency in networks of constraints (AC-3)"
        }
      ],
      "last_reviewed": "2026-05-07",
      "file": "cross-domain/cs-math/b-satisfiability-x-constraint-propagation.yaml"
    },
    {
      "id": "b-social-network-centrality-x-eigenvector",
      "title": "Social Network Centrality x Eigenvector Methods — PageRank as Katz centrality\n",
      "source_domain": "",
      "target_domain": "",
      "status": "proposed",
      "bridge_claim": "Social network centrality measures (PageRank, Katz centrality, eigenvector centrality, HITS) are all variants of the dominant eigenvector of the adjacency or transition matrix; the attenuation factor in Katz centrality controls which length paths dominate, and all measures converge in the limit of l",
      "open_unknowns": [],
      "related_hypotheses": [],
      "communication_gap": "Sociologists developed centrality measures (Bonacich 1972) independently of computer scientists developing PageRank (Brin & Page 1998); the spectral unification of all centrality measures was recognized by physicists studying network theory (Newman 2003) but is still not universally taught in social network analysis courses.\n",
      "translation_table": [
        {
          "field_a_term": "PageRank vector (Google's link algorithm)",
          "field_b_term": "Dominant left eigenvector of Google matrix G = αA/k + (1-α)ee^T/n",
          "note": "The Google matrix G is a stochastic matrix; PageRank is the stationary distribution of the random walk on G — exactly the dominant eigenvector by the Perron-Frobenius theorem, with dangling node correction.\n"
        },
        {
          "field_a_term": "Katz centrality x = (I - αA)^(-1)·1",
          "field_b_term": "Resolvent of the adjacency matrix",
          "note": "Katz centrality sums contributions from all paths weighted by α^(path length); the geometric series (I - αA)^(-1) = Σ (αA)^k is the resolvent, converging when α < 1/λ_max (spectral radius condition).\n"
        },
        {
          "field_a_term": "HITS (hubs and authorities)",
          "field_b_term": "Dominant singular vectors of adjacency matrix",
          "note": "HITS computes hub scores (AᵀA eigenvector) and authority scores (AAᵀ eigenvector) — the dominant right and left singular vectors of the adjacency matrix A, equivalent to a rank-1 SVD approximation.\n"
        },
        {
          "field_a_term": "Eigenvector centrality (Bonacich 1972)",
          "field_b_term": "Dominant eigenvector of symmetric adjacency matrix",
          "note": "For undirected networks, eigenvector centrality x = λ_max^(-1)Ax is the dominant eigenvector; Perron-Frobenius theorem guarantees positive entries (all centralities are positive) for connected graphs.\n"
        }
      ],
      "references": [
        {
          "doi": "10.1080/0022250X.1972.9989806",
          "note": "Bonacich (1972) — Factoring and weighting approaches to status scores and clique identification; J Math Sociol 2:113"
        }
      ],
      "last_reviewed": "2026-05-07",
      "file": "cross-domain/cs-math/b-social-network-centrality-x-eigenvector.yaml"
    },
    {
      "id": "b-spectral-clustering-x-graph-laplacian",
      "title": "Spectral clustering ↔ Graph Laplacian — eigenvectors as community indicators",
      "source_domain": "",
      "target_domain": "",
      "status": "proposed",
      "bridge_claim": "Spectral clustering finds community structure by computing eigenvectors of the graph Laplacian L = D - A; the Fiedler vector (second smallest eigenvector) bisects the graph at minimum cut, and k eigenvectors identify k communities — connecting machine learning clustering to algebraic graph theory an",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-spectral-clustering-x-graph-laplacian"
      ],
      "communication_gap": "Spectral methods for graph partitioning were developed in numerical linear algebra (Fiedler 1973) and VLSI circuit partitioning (Pothen 1990). Spectral clustering was independently rediscovered in machine learning (Shi & Malik 2000, Ng et al. 2001) without reference to the Fiedler algebraic graph theory literature. The Cheeger inequality connection — providing theoretical guarantees for spectral clustering — was imported from differential geometry (Cheeger 1970) to graph theory and only then to machine learning via the work of Spielman & Teng.",
      "translation_table": [
        {
          "field_a_term": "graph Laplacian L = D - A (degree matrix minus adjacency matrix)",
          "field_b_term": "diffusion operator on graph (discrete Laplacian for random walk)",
          "note": "L is positive semidefinite; eigenvalues 0 = λ₁ ≤ λ₂ ≤ ... ≤ λₙ; λ₂ = algebraic connectivity"
        },
        {
          "field_a_term": "Fiedler vector (eigenvector corresponding to λ₂, smallest nonzero eigenvalue)",
          "field_b_term": "minimum bisection of graph (Cheeger constant bound on minimum cut)",
          "note": "Sign of Fiedler vector coordinates gives optimal graph bisection; h(G) ≥ λ₂/2 (Cheeger inequality)"
        },
        {
          "field_a_term": "spectral clustering (k-means on k leading Laplacian eigenvectors)",
          "field_b_term": "low-dimensional random walk embedding (diffusion maps)",
          "note": "k-means in spectral domain ≡ k-means in diffusion distance space; both find community structure"
        },
        {
          "field_a_term": "spectral gap λ₂ (difference between second and first Laplacian eigenvalue)",
          "field_b_term": "random walk mixing time τ_mix = O(1/λ₂) (graph theory)",
          "note": "Large spectral gap ↔ fast mixing ↔ well-separated communities; small gap ↔ slow mixing ↔ community structure"
        }
      ],
      "references": [
        {
          "doi": "10.1007/s11222-007-9033-z",
          "note": "von Luxburg (2007) — a tutorial on spectral clustering; Statistics and Computing 17:395"
        },
        {
          "doi": "10.1023/B:STCO.0000006967.33477.20",
          "note": "Fiedler (1973) — algebraic connectivity of graphs; Czech Math J 23:298"
        }
      ],
      "last_reviewed": "2026-05-07",
      "file": "cross-domain/cs-math/b-spectral-clustering-x-graph-laplacian.yaml"
    },
    {
      "id": "b-cellular-automata-x-computational-universality",
      "title": "Cellular automata x Computational universality — Rule 110 as universal Turing machine\n",
      "source_domain": "",
      "target_domain": "",
      "status": "proposed",
      "bridge_claim": "Conway's Game of Life and Wolfram's Rule 110 one-dimensional cellular automaton are Turing-complete; the capacity for universal computation emerges from simple local rules without central coordination — demonstrating that computational universality is a phase of matter at the edge between order and ",
      "open_unknowns": [],
      "related_hypotheses": [],
      "communication_gap": "Computer scientists studying computability theory and physicists studying emergent complexity in dynamical systems converged on cellular automata independently; Wolfram's (1984) systematic classification and Cook's (2004) proof of Rule 110 universality connected the communities, but the implications for physical computation limits and self-organization in nature are not widely appreciated outside complexity science.\n",
      "translation_table": [
        {
          "field_a_term": "Rule 110 class IV behavior (cellular automata)",
          "field_b_term": "computationally universal Turing machine (computer science)",
          "note": "Class IV CA exhibit complex non-periodic behavior sufficient for universal computation; Rule 110 was proven universal by Cook (2004)"
        },
        {
          "field_a_term": "CA phase transition (order-chaos boundary) (complexity science)",
          "field_b_term": "computational phase transition in random satisfiability (CS theory)",
          "note": "Both exhibit phase transitions between ordered (trivial), complex (universal), and chaotic regimes — the edge of chaos is optimal for computation"
        },
        {
          "field_a_term": "CA neighborhood and local rule (physics)",
          "field_b_term": "local computation / gate in circuit (computer science)",
          "note": "Each CA cell performs a local computation analogous to a logic gate; the global behavior emerges from massively parallel local operations"
        },
        {
          "field_a_term": "Garden of Eden state (cellular automata)",
          "field_b_term": "non-reversible computation (thermodynamics of computation)",
          "note": "Irreversible CA rules (not bijective) have Garden of Eden states, corresponding to Landauer-Bennett erasure cost in physical computation"
        }
      ],
      "references": [
        {
          "doi": "10.1002/cplx.6130010405",
          "note": "Wolfram (1984) - Universality and complexity in cellular automata; Physica D 10:1"
        }
      ],
      "last_reviewed": "2026-05-07",
      "file": "cross-domain/cs-physics/b-cellular-automata-x-computational-universality.yaml"
    },
    {
      "id": "b-neural-ode-x-dynamical-systems",
      "title": "Neural ODEs x Dynamical systems - continuous-depth networks as flow maps\n",
      "source_domain": "",
      "target_domain": "",
      "status": "proposed",
      "bridge_claim": "Neural ordinary differential equations (Chen et al. 2018) define network depth as continuous time in an ODE system dh/dt = f(h,t,theta); the network learns a vector field whose flow map transforms inputs to outputs at arbitrary time horizons, making deep learning a problem of learning dynamical syst",
      "open_unknowns": [],
      "related_hypotheses": [],
      "communication_gap": "Deep learning researchers developing ResNets (He et al. 2015) and mathematicians studying dynamical systems theory built separate frameworks for iterated transformations; Chen et al. (2018) recognized ResNets as Euler-method ODEs, enabling the continuous limit — but the full implications (Lyapunov stability for generalization, bifurcations in network behavior as depth increases, chaos as overfitting) are still being worked out at the intersection of deep learning theory and dynamical systems.\n",
      "translation_table": [
        {
          "field_a_term": "ResNet residual connection h_{l+1} = h_l + f(h_l) (deep learning)",
          "field_b_term": "Euler discretization of ODE dh/dt = f(h) (numerical methods/math)",
          "note": "A ResNet with small residuals is an Euler-discretized ODE; the continuous limit is a Neural ODE with learned vector field f"
        },
        {
          "field_a_term": "network depth L (deep learning)",
          "field_b_term": "integration time T in ODE solver (mathematics)",
          "note": "Depth maps to time; continuous networks have adaptive depth via ODE solver step size, equivalent to adaptive time-step integration"
        },
        {
          "field_a_term": "learned neural network parameters theta (machine learning)",
          "field_b_term": "vector field parameters of a parameterized dynamical system (dynamical systems)",
          "note": "Training a Neural ODE = finding the vector field (dynamical system) whose trajectories fit the data distribution"
        },
        {
          "field_a_term": "normalizing flow for density estimation (generative modeling)",
          "field_b_term": "continuous diffeomorphism / flow map of a smooth vector field (differential geometry)",
          "note": "Continuous normalizing flows are Neural ODEs for density estimation; the flow map provides exact log-likelihood via instantaneous change-of-variables"
        }
      ],
      "references": [
        {
          "doi": "10.48550/arXiv.1806.07366",
          "note": "Chen et al. (2018) - Neural ordinary differential equations; NeurIPS 2018 — introduces Neural ODEs as continuous-depth networks"
        }
      ],
      "last_reviewed": "2026-05-07",
      "file": "cross-domain/cs-physics/b-neural-ode-x-dynamical-systems.yaml"
    },
    {
      "id": "b-tensor-networks-x-quantum-states",
      "title": "Tensor networks ↔ Quantum many-body states — MPS as entanglement compression",
      "source_domain": "",
      "target_domain": "",
      "status": "proposed",
      "bridge_claim": "Matrix product states (MPS) and tensor network contractions provide an efficient classical representation of quantum many-body states with limited entanglement; the DMRG algorithm is a tensor network optimization that scales polynomially for 1D gapped systems — connecting quantum information theory ",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-tensor-networks-x-quantum-states"
      ],
      "communication_gap": "DMRG was invented by White (1992) as a numerical renormalisation group technique in condensed matter physics; matrix product states were developed by Fannes, Nachtergaele & Werner (1992) in quantum information theory. The equivalence of DMRG and MPS was recognised only around 2004-2007 (Verstraete & Cirac), enabling cross-fertilisation that produced the tensor network method unifying quantum information and condensed matter simulation.",
      "translation_table": [
        {
          "field_a_term": "matrix product state (MPS) for 1D quantum system",
          "field_b_term": "low-rank tensor decomposition of the quantum state amplitude array",
          "note": "MPS bond dimension χ bounds entanglement entropy S ≤ log₂(χ); efficient iff S = O(1)"
        },
        {
          "field_a_term": "entanglement entropy S = -Tr[ρ_A log ρ_A] of subsystem A",
          "field_b_term": "Schmidt rank (number of nonzero singular values in bipartition)",
          "note": "Area law S = O(1) for gapped 1D systems ↔ efficient MPS approximation"
        },
        {
          "field_a_term": "DMRG (density matrix renormalization group) algorithm",
          "field_b_term": "alternating least squares on MPS tensor network (numerical optimisation)",
          "note": "DMRG is a tensor network optimization; White's 1992 DMRG is equivalent to MPS variational compression"
        },
        {
          "field_a_term": "PEPS (projected entangled pair states) for 2D systems",
          "field_b_term": "2D tensor network with contraction complexity NP-hard in general",
          "note": "PEPS contraction is #P-hard; area law holds in 2D but MPS fails — PEPS is the natural extension"
        }
      ],
      "references": [
        {
          "doi": "10.1103/RevModPhys.77.259",
          "note": "Vidal (2007) — entanglement renormalisation; PRL 99:220405 — MERA tensor network"
        },
        {
          "doi": "10.1103/PhysRevLett.69.2863",
          "note": "White (1992) — density matrix formulation for quantum renormalization groups; PRL 69:2863"
        }
      ],
      "last_reviewed": "2026-05-07",
      "file": "cross-domain/cs-physics/b-tensor-networks-x-quantum-states.yaml"
    },
    {
      "id": "b-epidemiological-demographic-transition",
      "title": "The epidemiological transition (shift from infectious to chronic disease dominance) is mathematically coupled to the demographic transition (falling mortality then fertility) through age-structured SIR dynamics, where declining infectious mortality reshapes the age pyramid and redirects mortality burden toward non-communicable disease",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "Omran's epidemiological transition and Notestein's demographic transition are unified by age-structured epidemiological models: controlling infectious diseases lowers under-5 mortality (dP_young/dt term), concentrates survival to older cohorts (shifting age pyramid), and increases the proportion of ",
      "open_unknowns": [],
      "related_hypotheses": [],
      "communication_gap": "Public health researchers focus on disease control interventions while demographers study population structure and fertility; the mathematical coupling through age-structured survival functions is formalized in health transition theory but rarely taught across both disciplines simultaneously.",
      "translation_table": [
        {
          "field_a_term": "epidemiological transition stage (public health)",
          "field_b_term": "change in age-specific force of infection and mortality schedule μ(a) (demography)",
          "note": "Each Omran stage corresponds to a specific shift in μ(a): Stage 1 high μ at all ages, Stage 3 μ shifted to old age"
        },
        {
          "field_a_term": "under-5 mortality rate decline (public health)",
          "field_b_term": "compression of child mortality in the survival function S(a) (demography)",
          "note": "Reducing infant mortality is equivalent to stretching S(a) in the 0-5 age interval"
        },
        {
          "field_a_term": "disease burden shift from infectious to non-communicable (public health)",
          "field_b_term": "redistribution of lost-life-years from young to old cohorts (demography)",
          "note": "DALY calculations capture this mathematically as change in age-weighted mortality profile"
        },
        {
          "field_a_term": "fertility decline following mortality decline (demography)",
          "field_b_term": "behavioral response to increased child survival probability (public health)",
          "note": "The demographic transition delay quantifies the feedback between survival expectations and fertility decisions"
        }
      ],
      "references": [
        {
          "doi": "10.2307/2172988",
          "note": "Omran (1971) - original epidemiological transition theory"
        },
        {
          "doi": "10.1093/oxfordjournals.aje.a112398",
          "note": "Frenk et al. (1991) - polarized model of the health transition"
        },
        {
          "doi": "10.1016/S0140-6736(12)61728-0",
          "note": "Lozano et al. (2012) GBD - global burden of disease and epidemiological transition"
        }
      ],
      "last_reviewed": "2026-05-07",
      "file": "cross-domain/demography-public-health/b-epidemiological-demographic-transition.yaml"
    },
    {
      "id": "b-embryonic-axis-formation-wnt-bmp-bistability",
      "title": "Embryonic body-axis formation is controlled by opposing Wnt and BMP morphogen gradients that create a bistable switch, mapping developmental patterning onto the mathematics of reaction-diffusion systems and bifurcation theory.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "During vertebrate gastrulation, Wnt (posterior) and BMP (ventral) morphogen gradients interact with their inhibitors (Dickkopf, Noggin/Chordin) to form a double-negative feedback loop that is bistable: cells commit to either anterior/dorsal or posterior/ventral fates. This is mathematically a cusp b",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-wnt-bmp-bistable-axis-specification"
      ],
      "communication_gap": "Developmental biologists studying Wnt/BMP signaling and applied mathematicians studying bistable reaction-diffusion systems rarely co-publish; the theoretical morphogenesis community (Murray, Maini) has built models but experimental biologists often treat them as illustrative rather than quantitatively predictive.\n",
      "translation_table": [
        {
          "field_a_term": "Wnt/β-catenin signaling gradient (developmental biology)",
          "field_b_term": "activator concentration in activator-inhibitor model (mathematics)",
          "note": "Wnt activates target genes and its own inhibitor Dickkopf — the autocatalytic loop"
        },
        {
          "field_a_term": "anterior-posterior axis commitment (developmental biology)",
          "field_b_term": "bistable switch / saddle-node bifurcation (mathematics)",
          "note": "Cells snap between two stable states as morphogen concentration crosses a threshold"
        },
        {
          "field_a_term": "somite segmentation clock (developmental biology)",
          "field_b_term": "oscillatory reaction-diffusion pattern (mathematics)",
          "note": "Wnt/Notch clock generates a traveling wave that is arrested by FGF gradient"
        },
        {
          "field_a_term": "morphogen threshold interpretation (developmental biology)",
          "field_b_term": "bifurcation parameter value (mathematics)",
          "note": "Cells read gradient position by comparing activator/inhibitor ratio to a threshold"
        }
      ],
      "references": [
        {
          "doi": "10.1016/j.cell.2006.07.024",
          "note": "Piccolo et al. (2006) — Wnt-BMP antagonism and dorsoventral axis in vertebrates"
        },
        {
          "doi": "10.1126/science.1154120",
          "note": "Aulehla et al. (2008) — Wnt3a gradient and the segmentation clock"
        },
        {
          "doi": "10.1371/journal.pbio.1001139",
          "note": "Ben-Zvi et al. (2011) — scaling of the BMP gradient in the Xenopus embryo"
        }
      ],
      "last_reviewed": "2026-05-07",
      "file": "cross-domain/developmental-biology-mathematics/b-embryonic-axis-formation-wnt-bmp-bistability.yaml"
    },
    {
      "id": "b-regenerative-medicine-morphogenetic-fields",
      "title": "Regenerative medicine can harness morphogenetic field theory from developmental biology: the bioelectric and biochemical long-range signalling fields that guide embryonic patterning operate continuously in adult tissues and can be pharmacologically re-activated to instruct stem cells to reconstruct complex anatomical structures, providing a field-theoretic design language for regenerative therapies",
      "source_domain": "",
      "target_domain": "",
      "status": "proposed",
      "bridge_claim": "Morphogenetic fields, as formalized by Turing reaction-diffusion equations and bioelectric gradients (voltage-gated ion channel networks setting resting membrane potential), encode positional information used by cells during regeneration; in planaria, xenopus, and axolotl, disrupting or overriding t",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-bioelectric-pattern-regeneration-control"
      ],
      "communication_gap": "Developmental biologists studying morphogenetic fields work in model organisms with intact developmental programs while regenerative medicine clinicians focus on cell delivery and scaffold design; the bioelectric field-programming approach is championed by Levin's lab but is not yet integrated into mainstream stem cell medicine protocols.",
      "translation_table": [
        {
          "field_a_term": "embryonic morphogen gradient (Bicoid, Wnt, BMP) (developmental biology)",
          "field_b_term": "tissue bioelectric potential gradient Vmem(x) (medicine)",
          "note": "Both encode positional information; bioelectric gradients operate faster and at longer range than diffusible morphogens"
        },
        {
          "field_a_term": "reaction-diffusion instability generating organ pattern (developmental biology)",
          "field_b_term": "ion channel network generating tissue-scale Vmem pattern (medicine)",
          "note": "Vmem patterns exhibit Turing-like spatial wavelengths that set organ boundaries during regeneration"
        },
        {
          "field_a_term": "Waddington attractor basin for cell fate (developmental biology)",
          "field_b_term": "bioelectric attractor for tissue-level anatomy (medicine)",
          "note": "Morphogenetic fields define anatomical attractors; ion channel drugs can switch tissue toward different attractor states"
        },
        {
          "field_a_term": "planarian head-tail organizer (developmental biology)",
          "field_b_term": "bioelectric polarity axis in regenerating tissue (medicine)",
          "note": "Gap-junction permeability and Vmem polarity set head vs. tail identity in planaria, demonstrating field control of body plan"
        }
      ],
      "references": [
        {
          "doi": "10.1111/j.1469-7580.2011.01410.x",
          "note": "Levin (2011) - the wisdom of the body: future techniques and approaches to morphogenetic fields"
        },
        {
          "doi": "10.1126/science.1234097",
          "note": "Pezzulo & Levin (2016) - re-membering the body: applications of computational neuroscience to the top-down control of regeneration of limbs and other complex organs"
        },
        {
          "doi": "10.1038/s41586-023-06019-6",
          "note": "Tung & Bhanu (2023) - bioelectric signalling in regeneration (recent review)"
        }
      ],
      "last_reviewed": "2026-05-07",
      "file": "cross-domain/developmental-biology-medicine/b-regenerative-medicine-morphogenetic-fields.yaml"
    },
    {
      "id": "b-gene-networks-waddington-landscape",
      "title": "Developmental gene regulatory networks are dynamical systems whose stable attractors correspond to cell fates, mathematically representing Waddington's epigenetic landscape: each cell type is an attractor of the gene-expression vector field dX/dt = F(X), canalization corresponds to attractor basin depth, and transdifferentiation corresponds to noise-driven transitions between basins",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "The Waddington epigenetic landscape is made mathematically rigorous by gene regulatory network (GRN) dynamics: the GRN defines a vector field dX/dt = F(X) in gene-expression space ℝ^n, where stable fixed points are cell-type attractors; the landscape V(X) satisfying F(X) = -∇V(X) is the potential su",
      "open_unknowns": [],
      "related_hypotheses": [],
      "communication_gap": "Developmental biologists characterize GRNs experimentally through gene knockouts and expression profiling while dynamical systems mathematicians analyze vector fields and bifurcations; the Waddington landscape metaphor is widely used in cell biology but quantitative identification of attractors and basin boundaries from real GRN data remains methodologically underdeveloped.",
      "translation_table": [
        {
          "field_a_term": "cell fate / cell type (developmental biology)",
          "field_b_term": "stable fixed point (attractor) of the GRN ODE system (dynamical systems)",
          "note": "Each cell type corresponds to a stable equilibrium X* where F(X*) = 0 and all eigenvalues of DF(X*) have negative real parts"
        },
        {
          "field_a_term": "Waddington landscape valley depth (developmental biology)",
          "field_b_term": "potential well depth / attractor basin volume (dynamical systems)",
          "note": "Deeper valleys = more stable cell fates; basin depth determines resistance to transdifferentiation"
        },
        {
          "field_a_term": "developmental canalization (developmental biology)",
          "field_b_term": "structural stability of the attractor under parameter perturbation (dynamical systems)",
          "note": "Canalization: cell fate maintained despite genetic or environmental noise; = insensitivity of attractor to F(X) perturbations"
        },
        {
          "field_a_term": "cell reprogramming / transdifferentiation (developmental biology)",
          "field_b_term": "noise-driven transition between attractor basins (dynamical systems)",
          "note": "Reprogramming by Yamanaka factors corresponds to adding terms to F(X) that shift the landscape and create new attractors"
        }
      ],
      "references": [
        {
          "doi": "10.1242/dev.002626",
          "note": "Waddington (1957) - Strategy of the Genes: original epigenetic landscape metaphor"
        },
        {
          "doi": "10.1016/j.cell.2012.09.009",
          "note": "Bhattacharya et al. (2011) - attractor landscape analysis of cell fate decisions"
        },
        {
          "doi": "10.1073/pnas.1014827108",
          "note": "Wang et al. (2011) - quantifying Waddington landscape and biological paths for cell fate decisions"
        }
      ],
      "last_reviewed": "2026-05-07",
      "file": "cross-domain/developmental-biology-physics/b-gene-networks-waddington-landscape.yaml"
    },
    {
      "id": "b-morphogen-gradients-diffusion",
      "title": "Turing's reaction-diffusion mechanism generates biological spatial patterns from two morphogens — an activator (short-range positive feedback) and an inhibitor (long-range negative feedback) — with pattern wavelength λ ∝ √(D/k) predicted exactly from diffusion and kinetic constants.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "Alan Turing's 1952 paper \"The Chemical Basis of Morphogenesis\" showed that a homogeneous mixture of two interacting chemical species — an activator A and an inhibitor I — becomes spontaneously patterned when their diffusion rates differ sufficiently (D_I >> D_A). This reaction-diffusion (RD) instabi",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-turing-pattern-wavelength-experimental-test"
      ],
      "communication_gap": "Turing's 1952 paper was not read by developmental biologists for two decades; the field was dominated by Wolpert's French Flag model (1969) which requires pre-specified gradient sources. The Turing mechanism was treated as an interesting alternative without experimental evidence until the 1990s-2000s when fluorescent protein tools made it possible to measure morphogen dynamics in living embryos. Even now, many developmental biologists are skeptical of RD mechanisms because the measurement of both diffusion constants and kinetic rates in the same system is technically demanding.\n",
      "translation_table": [
        {
          "field_a_term": "activator morphogen A (e.g., Wnt, BMP, Nodal)",
          "field_b_term": "activator species u in Turing RD system (local positive feedback)"
        },
        {
          "field_a_term": "inhibitor morphogen I (e.g., Dkk, Noggin, Lefty)",
          "field_b_term": "inhibitor species v in Turing RD system (long-range negative feedback)"
        },
        {
          "field_a_term": "diffusion-driven instability (Turing instability)",
          "field_b_term": "linear instability of homogeneous steady state when D_I/D_A > threshold"
        },
        {
          "field_a_term": "pattern wavelength (hair follicle spacing, digit spacing)",
          "field_b_term": "most-unstable wavenumber k* = (k/D_A)^{1/2}"
        },
        {
          "field_a_term": "morphogen gradient (concentration vs. position)",
          "field_b_term": "steady-state diffusion profile C(x) = C₀·exp(-x/√(D/k))"
        },
        {
          "field_a_term": "threshold concentration for cell fate decision",
          "field_b_term": "level set of morphogen concentration field (French Flag boundaries)"
        },
        {
          "field_a_term": "gradient noise / cell-to-cell variability",
          "field_b_term": "fluctuations in diffusion equation at finite particle number"
        },
        {
          "field_a_term": "embryo scaling (pattern scales with organism size)",
          "field_b_term": "scale-invariant gradient (gradient length ∝ tissue length)"
        }
      ],
      "references": [
        {
          "doi": "10.1098/rstb.1952.0012",
          "note": "Turing (1952) Phil Trans R Soc B 237:37 — original reaction-diffusion patterning paper"
        },
        {
          "doi": "10.1016/0022-5193(69)90014-3",
          "note": "Wolpert (1969) J Theor Biol 25:1 — positional information and the French Flag model"
        },
        {
          "note": "Meinhardt (1982) Models of Biological Pattern Formation. Academic Press, London."
        },
        {
          "doi": "10.1126/science.1225394",
          "note": "Müller et al. (2012) Science 336:721 — quantitative test of Nodal/Lefty Turing system in zebrafish"
        },
        {
          "doi": "10.1016/j.cell.2014.11.018",
          "note": "Raspopovic et al. (2014) Science — Turing mechanism for digit patterning confirmed"
        }
      ],
      "last_reviewed": "2026-05-06",
      "file": "cross-domain/developmental-biology-physics/b-morphogen-gradients-diffusion.yaml"
    },
    {
      "id": "b-topological-defects-tissue-morphogenesis",
      "title": "Topological defects in active nematic liquid crystals drive cell extrusion and tissue morphogenesis: +1/2 charge defects in cellular monolayers generate extensile flows that accumulate cells and trigger apoptotic extrusion, while -1/2 defects create contractile flows that deplete cells, providing a physics-first explanation of tissue patterning and organ shape emergence\n",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "Confluent epithelial cell monolayers behave as active nematic liquid crystals in which cell elongation axes constitute the nematic director field; topological defects with winding number +1/2 generate self-propelling extensile active flows that concentrate mechanical stress, trigger local cell accum",
      "open_unknowns": [],
      "related_hypotheses": [],
      "communication_gap": "Liquid crystal physicists and developmental biologists rarely attend the same conferences; the active nematic framework was developed by soft matter physicists studying synthetic liquid crystals and was only translated to living tissues after 2010. Most cell biologists are unfamiliar with the mathematical formalism of topological defects (homotopy groups, winding numbers), while physicists rarely have access to live-cell monolayer imaging equipment and biological expertise needed to validate the predictions.\n",
      "translation_table": [
        {
          "field_a_term": "topological defect with charge +1/2 in nematic liquid crystal (physics)",
          "field_b_term": "cell accumulation site and extrusion hotspot in epithelial monolayer (developmental biology)",
          "note": "+1/2 defects self-propel due to active stress asymmetry; in living tissues they concentrate cells until compressive stress triggers apoptotic or live-cell extrusion, controlling cell number homeostasis"
        },
        {
          "field_a_term": "topological defect with charge -1/2 in nematic liquid crystal (physics)",
          "field_b_term": "cell depletion zone and local thinning region in epithelial sheet (developmental biology)",
          "note": "-1/2 defects are passive in extensile nematics; they act as sinks that draw cells away from the defect core, creating holes or thin regions in developing epithelia"
        },
        {
          "field_a_term": "nematic director field n(x) (physics)",
          "field_b_term": "cell elongation axis orientation field across tissue (developmental biology)",
          "note": "The coarse-grained orientation of cell long axes defines a two-dimensional nematic director field measurable by fluorescence microscopy"
        },
        {
          "field_a_term": "active extensile stress αQ (physics)",
          "field_b_term": "actomyosin contractility anisotropy in cell cortex (developmental biology)",
          "note": "Active nematic parameter α > 0 for extensile systems corresponds to stress fiber-driven elongation along the director; α < 0 (contractile) gives different defect dynamics"
        },
        {
          "field_a_term": "topological charge conservation and defect annihilation (physics)",
          "field_b_term": "coordinated tissue remodeling and wound closure (developmental biology)",
          "note": "Defect pair annihilation (+1/2 and -1/2) corresponds to healing events in tissue where locally disrupted cell orientations relax back to uniform alignment"
        }
      ],
      "references": [
        {
          "doi": "10.1038/nature14295",
          "note": "Saw et al. (2017) Nature — Topological defects in epithelia govern cell death and extrusion; first direct experimental demonstration in MDCK monolayers"
        },
        {
          "doi": "10.1038/nphys3312",
          "note": "Marchetti et al. (2013) — Hydrodynamics of soft active matter; foundational theory of active nematics and topological defects"
        },
        {
          "doi": "10.1073/pnas.1415851112",
          "note": "Kawaguchi et al. (2017) — Topological defects control collective dynamics in neural progenitor cell cultures"
        },
        {
          "doi": "10.1126/science.aar5196",
          "note": "Duclos et al. (2017) — Topological defects in confined populations of spindle-shaped cells; active nematic dynamics in myoblasts"
        }
      ],
      "last_reviewed": "2026-05-07",
      "file": "cross-domain/developmental-biology-physics/b-topological-defects-tissue-morphogenesis.yaml"
    },
    {
      "id": "b-delay-embedding-x-icu-deterioration-early-warning",
      "title": "Delay-embedding reconstructions can transfer from nonlinear dynamics to ICU deterioration early-warning indicators.",
      "source_domain": "",
      "target_domain": "",
      "status": "proposed",
      "bridge_claim": "Speculative analogy: Delay-embedding reconstructions can transfer from nonlinear dynamics to ICU deterioration early-warning indicators.",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-delay-embedding-indicators-improve-icu-deterioration-lead-time"
      ],
      "communication_gap": "Domain-specific vocabularies and benchmark conventions obscure transferable mathematical structure.",
      "translation_table": [],
      "references": [
        {
          "doi": "10.1023/A:1008925309027",
          "note": "Stochastic spiking dynamics reference for state-space reconstruction intuition."
        }
      ],
      "last_reviewed": "2026-05-08",
      "file": "cross-domain/dynamical-systems-critical-care/b-delay-embedding-x-icu-deterioration-early-warning.yaml"
    },
    {
      "id": "b-lstm-sequence-memory-x-icu-physiology-forecasting",
      "title": "Long short-term memory dynamics connect sequence-learning memory gates with ICU physiology forecasting.",
      "source_domain": "",
      "target_domain": "",
      "status": "proposed",
      "bridge_claim": "Speculative analogy: LSTM gating provides a sequence-memory abstraction that can capture delayed physiological interactions in ICU time-series forecasting.",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-missingness-augmented-lstm-models-improve-icu-decompensation-horizon-accuracy"
      ],
      "communication_gap": "ML sequence models optimize aggregate prediction metrics, whereas critical-care deployment requires transparent handling of irregular sampling and missingness.",
      "translation_table": [],
      "references": [
        {
          "doi": "10.1162/neco.1997.9.8.1735",
          "note": "Foundational LSTM architecture."
        }
      ],
      "last_reviewed": "2026-05-08",
      "file": "cross-domain/dynamical-systems-critical-care/b-lstm-sequence-memory-x-icu-physiology-forecasting.yaml"
    },
    {
      "id": "b-agricultural-biodiversity-ecosystem",
      "title": "Agricultural intensification reduces local biodiversity and ecosystem service delivery through a quantifiable biodiversity-ecosystem function relationship, informing the land-sparing versus land-sharing trade-off",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "Ecosystem service provision (pollination, pest control, nutrient cycling) scales as a saturating function of species richness S with half-saturation at S1/2 ~ 5-10 species, so intensification-driven local extinction reduces services at a rate determined by the slope of the BEF relationship near S_ag",
      "open_unknowns": [],
      "related_hypotheses": [],
      "communication_gap": "Agronomists optimize yield and profitability while ecologists study biodiversity; the quantitative BEF framework connecting species loss to ecosystem service reduction is known in ecology but rarely integrated into agricultural policy or farm management decisions.",
      "translation_table": [
        {
          "field_a_term": "species richness S in agricultural landscape",
          "field_b_term": "ecosystem service delivery Y(S) = Y_max * S / (S1/2 + S)",
          "note": "Michaelis-Menten BEF relationship: linear at low S, saturating at high S; slope at S_agricultural determines sensitivity to biodiversity loss"
        },
        {
          "field_a_term": "land-sparing strategy (high-yield, wildlife-free farmland)",
          "field_b_term": "high Y_max at low biodiversity; biodiversity concentrated in reserves",
          "note": "Land-sparing minimizes farmland area at cost of homogenizing agricultural matrix"
        },
        {
          "field_a_term": "land-sharing strategy (wildlife-friendly low-yield farming)",
          "field_b_term": "lower Y_max but higher S throughout farmland",
          "note": "Land-sharing trades yield for biodiversity; optimal when BEF slope is steep at S_agricultural"
        },
        {
          "field_a_term": "complementarity effect in mixed-species agricultural plots",
          "field_b_term": "transgressive overyielding: polyculture yield > best monoculture yield",
          "note": "Niche complementarity in intercropping systems mirrors BEF complementarity in natural ecosystems"
        }
      ],
      "references": [
        {
          "doi": "10.1126/science.1178168",
          "note": "Phalan et al. (2011) Science - land-sparing vs land-sharing comparison across bird and tree diversity"
        },
        {
          "doi": "10.1126/science.1060391",
          "note": "Tilman et al. (2001) Science - diversity and productivity in grassland ecosystem functioning"
        },
        {
          "doi": "10.1038/nature04150",
          "note": "Balvanera et al. (2006) - quantifying the evidence for biodiversity effects on ecosystem services"
        }
      ],
      "last_reviewed": "2026-05-07",
      "file": "cross-domain/ecology-biology/b-agricultural-biodiversity-ecosystem.yaml"
    },
    {
      "id": "b-coevolution-arms-races",
      "title": "Coevolution between interacting species drives reciprocal evolutionary arms races — the Red Queen hypothesis (Van Valen 1973) — whose dynamics are quantitatively described by the community interaction matrix and eigenvalue analysis, unifying evolutionary biology and ecological stability theory.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "Coevolution is reciprocal evolutionary change in interacting species. The Red Queen hypothesis (Van Valen 1973): species must continually evolve just to maintain fitness relative to coevolving partners — constant selection pressure with no stable equilibrium. Named for the Red Queen in Through the L",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-geographic-mosaic-coevolution-trait-variance"
      ],
      "communication_gap": "Evolutionary ecology journals (Evolution, American Naturalist) and molecular biology journals rarely cross-cite on this topic. The mathematical community-matrix framework for stability is known to theoretical ecologists but rarely applied to coevolutionary dynamics. Van Valen's original paper was rejected by major journals and published in his own journal (Evolutionary Theory) — indicating disciplinary resistance to the idea.\n",
      "translation_table": [
        {
          "field_a_term": "Red Queen (constant coevolution, no equilibrium)",
          "field_b_term": "eigenvalues of J with nonzero imaginary part — oscillatory dynamics",
          "note": "Persistent oscillation in trait space rather than convergence to equilibrium"
        },
        {
          "field_a_term": "TTX resistance level in garter snakes",
          "field_b_term": "quantitative trait matching coevolving partner trait",
          "note": "Resistance tracks local newt TTX production level — geographic mosaic prediction confirmed"
        },
        {
          "field_a_term": "geographic hot spot / cold spot",
          "field_b_term": "spatially variable selection coefficient s",
          "note": "s > 0 at hot spots drives reciprocal change; s ≈ 0 at cold spots does not"
        },
        {
          "field_a_term": "life-dinner principle (asymmetric selection)",
          "field_b_term": "asymmetric fitness landscape — faster molecular evolution in prey proteins",
          "note": "Substitution rate d_N/d_S elevated in prey receptor genes vs. predator toxin genes"
        },
        {
          "field_a_term": "community interaction matrix J_{ij}",
          "field_b_term": "coevolutionary dynamics as flow on the J matrix landscape",
          "note": "Eigen-analysis of J predicts whether coevolution leads to stability or Red Queen cycling"
        }
      ],
      "references": [
        {
          "note": "Van Valen (1973) Evol Theory 1:1 — Red Queen hypothesis"
        },
        {
          "doi": "10.1111/j.0014-3820.2002.tb01373.x",
          "note": "Brodie et al. (2002) Evolution 56:2067 — TTX resistance geographic coevolution"
        },
        {
          "note": "Thompson (2005) The Geographic Mosaic of Coevolution. University of Chicago Press"
        },
        {
          "doi": "10.1098/rspb.1979.0081",
          "note": "Dawkins & Krebs (1979) Proc R Soc B 205:489 — arms races and the life-dinner principle"
        }
      ],
      "last_reviewed": "2026-05-06",
      "file": "cross-domain/ecology-biology/b-coevolution-arms-races.yaml"
    },
    {
      "id": "b-holobiont-microbiome-coevolution",
      "title": "Holobiont Theory and Host-Microbiome Coevolution — the hologenome as a unit of selection integrates host genetics with vertically and horizontally transmitted microbial communities",
      "source_domain": "",
      "target_domain": "",
      "status": "proposed",
      "bridge_claim": "The holobiont concept (Margulis 1991; Zilber-Rosenberg & Rosenberg 2008) proposes that a host and its associated microbiome function as a single biological unit. The hologenome theory extends this to evolutionary timescales: natural selection acts on the hologenome — the combined genetic information",
      "open_unknowns": [],
      "related_hypotheses": [],
      "communication_gap": "Marine ecologists who study coral holobionts and human gut microbiome researchers rarely attend the same conferences or read each other's literature, despite studying formally equivalent systems. Evolutionary biologists are divided on whether the hologenome theory constitutes a genuine unit-of-selection extension of evolutionary theory or merely a useful metaphor. This debate has slowed integration with ecological and evolutionary microbiology.\n",
      "translation_table": [
        {
          "field_a_term": "hologenome (host + microbiome collective genome)",
          "field_b_term": "unit of selection encompassing host and its associated microorganisms",
          "note": "Hologenome theory is debated; key prediction is that microbiome variation is heritable and selectable"
        },
        {
          "field_a_term": "vertical microbiome transmission (maternal seeding)",
          "field_b_term": "Lamarckian-like inheritance of acquired microbial communities",
          "note": "C-section birth disrupts vaginal microbiome seeding → altered neonatal immune development"
        },
        {
          "field_a_term": "coral–Symbiodinium photosymbiosis",
          "field_b_term": "nutritional holobiont: endosymbiont provides up to 90% of coral carbon via photosynthesis",
          "note": "Symbiodinium clade switching ('symbiont shuffling') can confer thermotolerance on decadal timescales"
        },
        {
          "field_a_term": "germ-free mouse experiment with humanised microbiota",
          "field_b_term": "causal test of microbiome contribution to host metabolic phenotype",
          "note": "Obese-donor microbiota transmit obese phenotype; this is the gold-standard causal evidence for holobiont effects"
        },
        {
          "field_a_term": "breast milk human milk oligosaccharides (HMOs)",
          "field_b_term": "selective prebiotic niche construction for Bifidobacterium in neonatal gut",
          "note": "HMOs are non-digestible by infant but feed B. longum infantis → immune education and pathogen displacement"
        },
        {
          "field_a_term": "microbiome heritability (h² ≈ 0.5)",
          "field_b_term": "genetic contribution to gut microbiome composition",
          "note": "GWAS of microbiome composition identifies host loci (LCT, FUT2); but diet explains more variance than host genotype"
        }
      ],
      "references": [
        {
          "doi": "10.1111/j.1574-6976.2008.00123.x",
          "note": "Zilber-Rosenberg & Rosenberg (2008) FEMS Microbiol Rev 32:723 — hologenome theory"
        },
        {
          "doi": "10.1126/science.1218190",
          "note": "McFall-Ngai et al. (2013) Science 336:1363 — animals in a bacterial world"
        },
        {
          "doi": "10.1038/nature07540",
          "note": "Turnbaugh et al. (2009) Nature 457:480 — human gut microbiome"
        },
        {
          "doi": "10.1371/journal.pbio.1002226",
          "note": "Bordenstein & Theis (2015) PLoS Biol 13:e1002226 — hologenome theory revisited"
        }
      ],
      "last_reviewed": "2026-05-06",
      "file": "cross-domain/ecology-biology/b-holobiont-microbiome-coevolution.yaml"
    },
    {
      "id": "b-microbiome-ecology-host-health",
      "title": "The human gut microbiome is a complex ecological community of ~10¹³ microorganisms governed by ecological diversity metrics (Shannon entropy, Bray-Curtis dissimilarity) and keystone-species dynamics — and its ecological state directly determines host metabolic, immunological, and neurological health via the gut-brain axis.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "Ecology developed quantitative diversity metrics — Shannon entropy H = -Σpᵢ log pᵢ for α-diversity and Bray-Curtis dissimilarity for β-diversity — to characterize community composition, and identified keystone species whose disproportionate influence on community stability makes their loss catastrop",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-microbiome-diversity-host-resilience"
      ],
      "communication_gap": "Ecologists rarely read gastroenterology journals; gastroenterologists rarely apply quantitative ecology metrics beyond 16S rRNA sequencing diversity scores. The mathematical toolkit of community ecology (neutral theory, island biogeography, species abundance distributions) has only slowly penetrated microbiome research. Conversely, microbiome biologists have access to culturomics and metatranscriptomics data at scales ecologists have never had — data that could test long-standing ecological theories with unprecedented resolution.\n",
      "translation_table": [
        {
          "field_a_term": "Shannon entropy H (α-diversity)",
          "field_b_term": "gut microbiome diversity (clinical biomarker)",
          "note": "Low H predicts disease susceptibility; FMT increases H and restores health"
        },
        {
          "field_a_term": "Bray-Curtis dissimilarity (β-diversity)",
          "field_b_term": "enterotype clustering (Firmicutes vs. Bacteroidetes dominance)",
          "note": "Human gut microbiomes cluster into enterotypes analogous to ecological biomes"
        },
        {
          "field_a_term": "keystone species",
          "field_b_term": "keystone gut taxa (Akkermansia, F. prausnitzii)",
          "note": "Removal of keystone gut taxa causes community collapse and metabolic disease"
        },
        {
          "field_a_term": "ecological succession",
          "field_b_term": "microbiome colonization and FMT engraftment",
          "note": "FMT reconstitutes the gut community via an accelerated succession process"
        },
        {
          "field_a_term": "trophic cascade",
          "field_b_term": "gut-brain axis (SCFA → enteric NS → vagus → CNS)",
          "note": "SCFAs produced by gut bacteria are the metabolic link transmitting microbiome state to the brain"
        },
        {
          "field_a_term": "competitive exclusion (Gause's law)",
          "field_b_term": "C. difficile colonization resistance",
          "note": "A diverse resident community provides colonization resistance against pathogens via competitive exclusion"
        }
      ],
      "references": [
        {
          "note": "Turnbaugh et al. (2007) — human gut microbiome and obesity",
          "doi": "10.1038/nature06244"
        },
        {
          "note": "Qin et al. (2010) — human gut microbial gene catalogue",
          "doi": "10.1038/nature08821"
        },
        {
          "note": "Cryan et al. (2019) — the microbiota-gut-brain axis comprehensive review",
          "doi": "10.1152/physrev.00018.2018"
        },
        {
          "note": "van Nood et al. (2013) — FMT for recurrent C. difficile",
          "doi": "10.1056/NEJMoa1205037"
        }
      ],
      "last_reviewed": "2026-05-06",
      "file": "cross-domain/ecology-biology/b-microbiome-ecology-host-health.yaml"
    },
    {
      "id": "b-allelopathy-chemical-ecology",
      "title": "Allelopathy — plant chemical warfare via secondary metabolites — is the ecological instantiation of the same coevolutionary arms race chemistry that drives herbivore detoxification enzyme diversification, and plant VOC emissions create regional aerosol-climate feedbacks connecting chemical ecology to atmospheric physics.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "Allelopathy is the release of phytochemicals (allelochemicals) by plants that inhibit the germination, growth, or survival of neighbouring plants. Juglone (5-hydroxy-1,4-naphthoquinone) from black walnut (Juglans nigra) inhibits respiratory chain complex I and plant mitochondrial electron transport ",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-allelopathy-glucosinolate-diversity-coevolution-ratchet"
      ],
      "communication_gap": "Allelopathy research sits at the boundary of plant physiology, soil science, and natural products chemistry — three communities that rarely attend the same conferences. The atmospheric chemistry connection (VOC → SOA → cloud formation) is studied by atmospheric scientists who have no contact with plant ecologists. The Ehrlich-Raven coevolution framework is known to evolutionary biologists but not to agrochemists who study the same compounds as herbicide leads.\n",
      "translation_table": [
        {
          "field_a_term": "complex I inhibitor (respiratory chain)",
          "field_b_term": "juglone (allelopathic quinone from Juglans nigra)",
          "note": "Juglone's mechanism as respiratory inhibitor explains its broad phytotoxicity"
        },
        {
          "field_a_term": "coevolutionary arms race (Red Queen)",
          "field_b_term": "Ehrlich-Raven diversification — plant glucosinolates ↔ butterfly detoxification enzymes",
          "note": "Both produce runaway diversification; the biochemical ratchet generates > 100 glucosinolate structures in Brassicaceae"
        },
        {
          "field_a_term": "mycorrhizal network disruption",
          "field_b_term": "garlic mustard isothiocyanate attacking native forest AMF networks",
          "note": "Invasive success partly explained by undermining native plant mutualistic infrastructure"
        },
        {
          "field_a_term": "secondary organic aerosol (SOA) formation",
          "field_b_term": "biogenic isoprene and monoterpene oxidation → cloud condensation nuclei",
          "note": "Boreal forest monoterpene emissions create a negative climate feedback (more SOA → more clouds → less solar heating)"
        },
        {
          "field_a_term": "metabolomics fingerprint",
          "field_b_term": "allelochemical profile of soil and plant tissue",
          "note": "LC-MS metabolomics can map allelopathic compounds in rhizosphere soil and identify active fractions"
        }
      ],
      "references": [
        {
          "note": "Rice, E.L. (1984) Allelopathy, 2nd ed. Academic Press."
        },
        {
          "doi": "10.1111/j.1558-5646.1964.tb01674.x",
          "note": "Ehrlich & Raven (1964) Evolution 18:586 — butterflies and plants; coevolution and secondary metabolite diversification"
        },
        {
          "doi": "10.1890/1540-9295(2004)002[0436:NAEWPF]2.0.CO;2",
          "note": "Callaway & Ridenour (2004) Front Ecol Environ 2:436 — novel weapons hypothesis; garlic mustard allelopathy"
        },
        {
          "doi": "10.1023/A:1006127516791",
          "note": "Kesselmeier & Staudt (1999) J Atmos Chem 33:23 — biogenic volatile organic compounds from vegetation; global estimates"
        }
      ],
      "last_reviewed": "2026-05-06",
      "file": "cross-domain/ecology-chemistry/b-allelopathy-chemical-ecology.yaml"
    },
    {
      "id": "b-biogeochemical-cycles-thermodynamic-disequilibrium",
      "title": "Life maintains Earth's atmosphere in extreme thermodynamic disequilibrium — the simultaneous presence of O₂ and CH₄ is a detectable biosignature — connecting ecology (biosphere activity) to atmospheric chemistry through Prigogine's dissipative structure theory.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "proposed",
      "bridge_claim": "Thermodynamic equilibrium of Earth's atmosphere (if life were absent) would yield a CO₂-dominated atmosphere similar to Mars or Venus, with negligible O₂ and CH₄. The simultaneous presence of O₂ (21%) and CH₄ (~1.8 ppm) represents a chemical disequilibrium: these gases react spontaneously (CH₄ + 2O₂",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-atmospheric-disequilibrium-biosignature-exoplanets"
      ],
      "communication_gap": "Lovelock trained as a chemist-inventor; ecologists regarded the Gaia hypothesis as teleological and resisted it. Atmospheric chemists and climate scientists engaged with the chemistry but not the thermodynamic framing. Prigogine's non-equilibrium thermodynamics was developed in the physics and chemistry communities and rarely cited in ecology or astrobiology literature. Krissansen-Totton et al. (2018) provided the first rigorous thermodynamic calculation in a planetary science context.\n",
      "translation_table": [
        {
          "field_a_term": "dissipative structure (Prigogine, far-from-equilibrium ordered system)",
          "field_b_term": "biosphere-atmosphere coupled system maintained by solar flux"
        },
        {
          "field_a_term": "Gibbs free energy of atmospheric disequilibrium ΔG_atm",
          "field_b_term": "quantitative biosignature strength (distinguishes inhabited from uninhabited planets)"
        },
        {
          "field_a_term": "atmospheric equilibrium composition (thermodynamic minimum)",
          "field_b_term": "abiotic reference state (Mars/Venus-like CO₂ atmosphere)"
        },
        {
          "field_a_term": "O₂–CH₄ chemical disequilibrium pair",
          "field_b_term": "primary atmospheric biosignature detectable by remote spectroscopy"
        },
        {
          "field_a_term": "biogeochemical cycling rate (e.g., GPP, methanogenesis rate)",
          "field_b_term": "disequilibrium maintenance rate — quantifies ecosystem contribution"
        },
        {
          "field_a_term": "entropy production rate of the biosphere",
          "field_b_term": "metabolic dissipation rate — maximum entropy production principle"
        }
      ],
      "references": [
        {
          "doi": "10.1016/0004-6981(72)90071-4",
          "note": "Lovelock (1972) Gaia as seen through the atmosphere. Atmos Environ 6:579"
        },
        {
          "doi": "10.1111/j.2153-3490.1974.tb01946.x",
          "note": "Lovelock & Margulis (1974) Atmospheric homeostasis by and for the biosphere. Tellus 26:2"
        },
        {
          "doi": "10.1126/sciadv.aao5747",
          "note": "Krissansen-Totton et al. (2018) Disequilibrium biosignatures over Earth history. Sci Adv 4:eaao5747"
        },
        {
          "doi": "10.1126/science.201.4358.777",
          "note": "Prigogine (1978) Time, structure, and fluctuations. Science 201:777"
        }
      ],
      "last_reviewed": "2026-05-06",
      "file": "cross-domain/ecology-chemistry/b-biogeochemical-cycles-thermodynamic-disequilibrium.yaml"
    },
    {
      "id": "b-ecological-stoichiometry-liebigs-law",
      "title": "Ecological stoichiometry quantifies how the ratios of chemical elements (C:N:P) constrain organism growth and ecosystem processes, with Liebig's law of the minimum from agricultural chemistry providing the foundational principle that growth is limited by the scarcest required nutrient relative to demand.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "Liebig's law (1840) states that plant yield is determined by the most limiting nutrient: growth rate μ = μ_max · min(S_N/K_N, S_P/K_P, S_C/K_C) where S_i are nutrient concentrations and K_i are half-saturation constants. Ecological stoichiometry extends this: the elemental ratio of biomass (e.g. Red",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-homeostatic-regulation-vs-flexible-stoichiometry-taxa"
      ],
      "communication_gap": "Agricultural chemists applying Liebig's law to soil fertility and ecologists studying stoichiometric constraints on food webs developed their frameworks largely independently; the explicit mathematical unification via threshold elemental ratios emerged in the 1990s with Sterner and Elser's work.\n",
      "translation_table": [
        {
          "field_a_term": "limiting nutrient (ecology)",
          "field_b_term": "Liebig minimum factor (chemistry)",
          "note": "Same concept; ecology applies it to consumers and multiple trophic levels"
        },
        {
          "field_a_term": "Redfield ratio C:N:P = 106:16:1 (ecology)",
          "field_b_term": "stoichiometric demand ratio (chemistry)",
          "note": "Empirical elemental ratio defining \"balanced\" phytoplankton nutrition"
        },
        {
          "field_a_term": "threshold elemental ratio TER (ecology)",
          "field_b_term": "stoichiometric break-even point (chemistry)",
          "note": "TER predicts the supply ratio where limitation switches between C and mineral nutrients"
        },
        {
          "field_a_term": "nutrient recycling efficiency (ecology)",
          "field_b_term": "mass balance in biogeochemical cycles (chemistry)",
          "note": "Stoichiometric constraints determine how efficiently nutrients are cycled through trophic levels"
        }
      ],
      "references": [
        {
          "doi": "10.1016/S0169-5347(01)02247-X",
          "note": "Sterner & Elser (2002) - ecological stoichiometry foundational textbook reference"
        },
        {
          "doi": "10.1038/nature01208",
          "note": "Sterner & Elser (2002) - stoichiometry and the fate of excess nutrients"
        },
        {
          "doi": "10.1890/0012-9658(2004)085[0393:TEROCE]2.0.CO;2",
          "note": "Frost et al. (2004) - threshold elemental ratios and consumer growth in lakes"
        }
      ],
      "last_reviewed": "2026-05-07",
      "file": "cross-domain/ecology-chemistry/b-ecological-stoichiometry-liebigs-law.yaml"
    },
    {
      "id": "b-peat-bog-autocatalytic-decomposition",
      "title": "Peat bog carbon dynamics exhibit autocatalytic decomposition feedbacks where warming-induced microbial activity accelerates decomposition, releasing CO₂ that further warms the atmosphere — a positive feedback loop modeled by autocatalytic chemical kinetics, with pH buffering by Sphagnum moss acting as the key negative feedback that maintains peat stability under current conditions.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "Autocatalytic decomposition follows d[P]/dt = -k·[P]·[E] where [P] = peat substrate and [E] = enzyme/microbial biomass, with [E] itself growing as d[E]/dt = r·[P] - δ·[E] (growth from substrate, decay at rate δ). The system has a saddle-point instability: below a threshold peat substrate concentrati",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-sphagnum-restoration-stabilizes-degraded-peatland-carbon"
      ],
      "communication_gap": "Peat ecologists studying bog carbon balance and chemists studying autocatalytic decomposition kinetics have overlapping frameworks, but quantitative kinetic modeling of bog autocatalytic feedbacks is dominated by ecologists using empirical temperature sensitivities (Q₁₀ factors) rather than mechanistic enzyme-substrate kinetic models from biochemistry; bridging the two would improve climate feedback projections.\n",
      "translation_table": [
        {
          "field_a_term": "peat decomposition positive feedback (ecology)",
          "field_b_term": "autocatalytic reaction / substrate-catalyst co-amplification (chemistry)",
          "note": "Microbial biomass produced by decomposition catalyzes further decomposition — autocatalysis"
        },
        {
          "field_a_term": "Sphagnum moss acidification (ecology)",
          "field_b_term": "pH-dependent enzyme inhibition / negative feedback (chemistry)",
          "note": "Sphagnum lowers pH to suppress decomposer enzyme activity, providing chemical stabilization"
        },
        {
          "field_a_term": "peat tipping point / irreversible loss (ecology)",
          "field_b_term": "autocatalytic ignition / explosive reaction threshold (chemistry)",
          "note": "Both represent the critical point where positive feedback overwhelms negative controls"
        },
        {
          "field_a_term": "methane emission rate from bogs (ecology)",
          "field_b_term": "product yield of anaerobic autocatalytic pathway (chemistry)",
          "note": "CH₄ is the end product of anaerobic peat decomposition; its rate scales with autocatalytic feedback"
        }
      ],
      "references": [
        {
          "doi": "10.1111/j.1365-2486.2007.01339.x",
          "note": "Turetsky et al. (2002) - sphagnum-dominated peatlands and climate feedbacks"
        },
        {
          "doi": "10.1038/nature09864",
          "note": "Dorrepaal et al. (2009) - carbon respiration from subsurface peat accelerated by climate warming"
        },
        {
          "doi": "10.1038/nclimate1672",
          "note": "Frolking et al. (2011) - peatland carbon stocks and fluxes (global review)"
        }
      ],
      "last_reviewed": "2026-05-07",
      "file": "cross-domain/ecology-chemistry/b-peat-bog-autocatalytic-decomposition.yaml"
    },
    {
      "id": "b-redfield-ratio-ocean-stoichiometry",
      "title": "The Redfield ratio C:N:P = 106:16:1 reflects the average elemental stoichiometry of marine phytoplankton and constrains global ocean nutrient cycling through chemical mass balance",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "Deep ocean nutrient concentrations maintain C:N:P ~ 106:16:1 (Redfield ratio) because phytoplankton growth stoichiometry and bacterial remineralization are coupled through the same biochemical machinery, creating a geochemical constraint that links ocean chemistry to biological productivity across o",
      "open_unknowns": [],
      "related_hypotheses": [],
      "communication_gap": "Marine chemists measure nutrient ratios while ecologists study phytoplankton physiology; the mechanistic connection between cellular biochemistry and ocean-scale chemical stoichiometry is underemphasized in both oceanography and chemistry curricula.",
      "translation_table": [
        {
          "field_a_term": "phytoplankton C:N:P stoichiometry",
          "field_b_term": "phospholipid, RNA, and protein fractional composition of cells",
          "note": "N/P ratio reflects ribosome-to-protein balance; phosphorus-rich ribosomes drive low N/P in fast-growing cells"
        },
        {
          "field_a_term": "deep water nitrate-to-phosphate ratio",
          "field_b_term": "Redfield ratio 16:1 predicted by mass balance of remineralization",
          "note": "Bacterial decomposition regenerates N and P in ratio equal to phytoplankton uptake ratio, closing the cycle"
        },
        {
          "field_a_term": "nitrogen fixation and denitrification balance",
          "field_b_term": "deviation from Redfield N:P in oxygen minimum zones",
          "note": "Denitrification removes N without P; N fixation adds N; their balance determines ocean N inventory"
        },
        {
          "field_a_term": "stoichiometric flexibility (plasticity)",
          "field_b_term": "Droop model: growth rate limited by most deficient nutrient",
          "note": "C:P ratios vary 10-fold across phytoplankton under P limitation; N:P varies 5-fold under N limitation"
        }
      ],
      "references": [
        {
          "doi": "10.4319/lo.1958.3.1.0054",
          "note": "Redfield (1958) Am Sci - original paper establishing the 106:16:1 ratio and its biological basis"
        },
        {
          "doi": "10.1126/science.1128253",
          "note": "Falkowski et al. (2000) Science - why is the N:P ratio of ocean phytoplankton 16:1?"
        },
        {
          "doi": "10.1073/pnas.0805876105",
          "note": "Klausmeier et al. (2004) - optimal N:P stoichiometry of phytoplankton from evolutionary optimization"
        }
      ],
      "last_reviewed": "2026-05-07",
      "file": "cross-domain/ecology-chemistry/b-redfield-ratio-ocean-stoichiometry.yaml"
    },
    {
      "id": "b-soil-microbiome-carbon-cycling",
      "title": "Soil microbial carbon use efficiency (CUE = 0.3–0.6) and the MEMS framework (high-CUE microbes → necromass → organo-mineral stabilisation) determine whether soil's 2,500 Gt C reservoir accumulates or mineralises, with +3-4°C warming predicted to release ~55 Gt C by 2100 via microbial priming.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "Soil holds ~2,500 Gt C — more than three times the combined carbon in the atmosphere (~870 Gt C) and all living biomass (~600 Gt C). The fate of this carbon depends critically on soil microbial community composition and metabolism, connecting ecological community ecology to biogeochemistry and globa",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-mems-high-cue-fungi-mineral-soc-stabilization-warming"
      ],
      "communication_gap": "Soil microbial ecology (ecology journals: ISME J, Soil Biol Biochem), biogeochemistry (Global Biogeochem Cycles, Nat Clim Chang), and Earth system modelling (J Clim, J Adv Model Earth Syst) have different temporal scales (years vs. decades vs. centuries), different spatial scales (pedon vs. landscape vs. global), and different data types (16S rRNA amplicon vs. isotope ratio vs. atmospheric CO₂). The MEMS framework (2013) took nearly a decade to influence global carbon model development. Climate scientists rarely attend soil ecology meetings and vice versa.\n",
      "translation_table": [
        {
          "field_a_term": "carbon use efficiency CUE = ΔC_biomass / ΔC_total",
          "field_b_term": "microbial thermodynamic growth yield (biochemistry: ATP per C-mol)"
        },
        {
          "field_a_term": "priming effect (labile C input → change in recalcitrant C decomposition)",
          "field_b_term": "co-metabolic enzyme induction (microbiology: substrate switching)"
        },
        {
          "field_a_term": "organo-mineral association (Fe/Al-oxide sorption of necromass)",
          "field_b_term": "surface chemistry adsorption isotherm (Langmuir/Freundlich)"
        },
        {
          "field_a_term": "Q₁₀ temperature sensitivity of soil respiration",
          "field_b_term": "Arrhenius activation energy E_a of microbial enzyme complexes"
        },
        {
          "field_a_term": "SOC residence time (radiocarbon ¹⁴C mean age)",
          "field_b_term": "half-life of mineral-protected organic carbon pool"
        }
      ],
      "references": [
        {
          "doi": "10.1126/science.1097396",
          "note": "Lal (2004) Science 304:1623 — soil carbon sequestration impacts on global climate change"
        },
        {
          "doi": "10.1016/j.soilbio.2011.11.024",
          "note": "Schimel & Schaeffer (2012) Soil Biol Biochem 47:205 — microbial control over C and N cycling"
        },
        {
          "doi": "10.1038/nclimate2361",
          "note": "Wieder et al. (2014) Nat Clim Chang 4:739 — global soil carbon projections with microbial CUE"
        },
        {
          "doi": "10.1038/ncomms13630",
          "note": "Kallenbach et al. (2016) Nat Commun 7:13630 — direct evidence for microbial-derived SOM formation"
        }
      ],
      "last_reviewed": "2026-05-06",
      "file": "cross-domain/ecology-chemistry/b-soil-microbiome-carbon-cycling.yaml"
    },
    {
      "id": "b-stoichiometry-liebig-minimum",
      "title": "Ecological stoichiometry treats organisms as chemical reactors with fixed elemental ratios (the Redfield ratio in marine phytoplankton), and Liebig's law of the minimum — growth is limited by the scarcest nutrient relative to stoichiometric demand — is the biological application of chemical equilibrium constraints.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "Organisms maintain remarkably fixed elemental compositions despite variable environmental nutrient ratios. Marine phytoplankton converge on the Redfield ratio C:N:P ≈ 106:16:1 (by atoms), first documented by Alfred Redfield (1958) and later explained as an optimum under evolutionary selection (Klaus",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-liebig-minimum-generalizes-le-chatelier"
      ],
      "communication_gap": "Von Liebig (1840) and Redfield (1958) wrote before modern ecological stoichiometry as a field existed, so their chemical insights were not formalised in the language of chemical thermodynamics. Sterner and Elser (2002) built the field but their textbook is read by ecologists, not chemists. The Le Chatelier connection is conceptually immediate but has not, to our knowledge, been formally demonstrated in any cross-disciplinary publication.\n",
      "translation_table": [
        {
          "field_a_term": "limiting nutrient (element in shortest supply relative to demand)",
          "field_b_term": "limiting reagent in a chemical reaction",
          "note": "the reaction (growth) stops when the limiting reagent (nutrient) is exhausted"
        },
        {
          "field_a_term": "Redfield ratio C:N:P = 106:16:1",
          "field_b_term": "stoichiometric coefficients of the overall biochemical reaction",
          "note": "deviations from Redfield indicate resource imbalance or evolutionary adaptation"
        },
        {
          "field_a_term": "internal cell quota Q (element per cell)",
          "field_b_term": "intracellular concentration of a chemical reactant"
        },
        {
          "field_a_term": "Liebig's law of the minimum",
          "field_b_term": "Le Chatelier's principle (system responds to remove stress on limiting reactant)"
        },
        {
          "field_a_term": "stoichiometric mismatch (consumer vs. food elemental ratio)",
          "field_b_term": "off-stoichiometry reaction — requires excess of non-limiting reactants"
        },
        {
          "field_a_term": "nutrient recycling efficiency",
          "field_b_term": "reaction yield / atom economy in green chemistry"
        }
      ],
      "references": [
        {
          "note": "Liebig (1840) — Chemistry in its Application to Agriculture and Physiology; Taylor and Walton, London"
        },
        {
          "note": "Redfield (1958) — The biological control of chemical factors in the environment; Am Sci 46:205"
        },
        {
          "note": "Sterner & Elser (2002) — Ecological Stoichiometry; Princeton University Press"
        },
        {
          "doi": "10.1111/j.1461-0248.2007.01109.x",
          "note": "Elser et al. (2007) — Global analysis of nitrogen and phosphorus limitation; Ecol Lett 10:1135"
        }
      ],
      "last_reviewed": "2026-05-06",
      "file": "cross-domain/ecology-chemistry/b-stoichiometry-liebig-minimum.yaml"
    },
    {
      "id": "b-vicsek-flocking-x-consensus-raft-leader-stability",
      "title": "Vicsek-type flocking models exhibit noise-driven order–disorder transitions where local alignment rules produce macroscopic directed motion — Raft-style distributed consensus maintains replicated logs under message delays and failures — both fields analyze stability of collective agreement variables (order parameter magnitude vs committed log index) though microscopic mechanisms (heading alignment vs RPC votes) differ.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "proposed",
      "bridge_claim": "Increasing noise η in Vicsek models destroys orientational order beyond critical η_c analogous (qualitatively) to consensus latency rising until leader election thrashes — topological versus metric neighborhoods in flocking mirror partially connected overlay graphs in distributed systems — bridge st",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-critical-noise-sweep-scaling-parallels-election-timeout-sweep-phenomenologically"
      ],
      "communication_gap": "Active-matter theorists cite Vicsek & Chaté while systems engineers cite Ongaro & Ousterhout — cross-citations remain rare despite shared noisy coordination mathematics literature (McKean–Vlasov, stochastic stability).\n",
      "translation_table": [
        {
          "field_a_term": "Vicsek alignment noise amplitude η",
          "field_b_term": "Message loss / jitter rates perturbing Raft follower votes",
          "note": "Both raise disorder probability in coordination dynamics toy models."
        },
        {
          "field_a_term": "Average flock polarization order parameter |⟨exp(iθ)⟩|",
          "field_b_term": "Fraction of nodes sharing matching committed log suffix (high-level)",
          "note": "Scalar summaries of agreement levels — not equivalent observables."
        },
        {
          "field_a_term": "Topological interaction graphs (metric-free Vicsek variants)",
          "field_b_term": "Sparse WAN topologies with quorum connectivity requirements",
          "note": "Graph robustness stories resonate across disciplines."
        }
      ],
      "references": [
        {
          "doi": "10.1103/PhysRevLett.75.1226",
          "note": "Vicsek et al. (1995) — flocking transition in self-propelled particle models"
        },
        {
          "doi": "10.1145/3195648",
          "note": "Ongaro & Ousterhout (2014) — Raft consensus algorithm for replicated logs"
        }
      ],
      "last_reviewed": "2026-05-07",
      "file": "cross-domain/ecology-computer-science/b-vicsek-flocking-x-consensus-raft-leader-stability.yaml"
    },
    {
      "id": "b-control-lyapunov-ecological-harvest-management",
      "title": "Control-Lyapunov framing of ecological harvest policy links biomass resilience objectives to explicit stabilizing feedback constraints under environmental shocks.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "proposed",
      "bridge_claim": "Biomass dynamics with harvesting can be treated as controlled nonlinear systems where safe operating regions are encoded by Lyapunov-like functions over population state. This bridge converts ecological resilience targets (avoid collapse, maintain recovery margin) into verifiable feedback policy con",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-clf-constrained-harvest-stabilizes-biomass-under-shocks"
      ],
      "communication_gap": "Ecological management plans often describe resilience qualitatively, while control formulations demand explicit stability functions; a shared operational vocabulary is still uncommon.\n",
      "translation_table": [
        {
          "field_a_term": "resilience basin and recovery rate",
          "field_b_term": "Lyapunov decrease conditions and region of attraction",
          "note": "Ecological \"distance to collapse\" maps to a stability certificate margin."
        },
        {
          "field_a_term": "harvest quota and effort limits",
          "field_b_term": "constrained input bounds and saturation nonlinearities",
          "note": "Feasible policy sets are naturally represented as control constraints."
        },
        {
          "field_a_term": "environmental stochastic forcing",
          "field_b_term": "disturbance-robust stability margin",
          "note": "Robust control tools quantify tolerated shock envelopes."
        }
      ],
      "references": [
        {
          "doi": "10.1038/261459a0",
          "note": "May (1976), simple mathematical models with very complicated dynamics."
        }
      ],
      "last_reviewed": "2026-05-08",
      "file": "cross-domain/ecology-control-engineering/b-control-lyapunov-ecological-harvest-management.yaml"
    },
    {
      "id": "b-bet-hedging-x-portfolio-diversification",
      "title": "Evolutionary bet hedging spreads reproductive risk across correlated environmental states — analogous to diversification lowering variance of portfolio returns when asset shocks are imperfectly correlated — making correlation structure (between-year environments vs between-lineage phenotypes) the shared mathematical object linking ecology and finance.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "proposed",
      "bridge_claim": "Bet hedging trades arithmetic mean fitness for geometric mean fitness across stochastic environments by maintaining phenotypic variance or stochastic switching (Lottery vs conservative strategies). Portfolio diversification trades mean return for variance reduction by combining imperfectly correlate",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-bet-hedging-x-portfolio-diversification"
      ],
      "communication_gap": "Evolutionary ecology emphasizes bet hedging verbal definitions while mathematical finance emphasizes covariance estimation and tail risk; cross citations remain sparse outside theoretical ecology journals importing Kelly portfolio ideas.\n",
      "translation_table": [
        {
          "field_a_term": "Phenotypic variance / stochastic switching rate in bet hedging",
          "field_b_term": "Portfolio weights across risky assets",
          "note": "Both tune allocation across substates to reshape variance of growth rates."
        },
        {
          "field_a_term": "Environmental covariance across time or patches",
          "field_b_term": "Asset return covariance matrix Σ",
          "note": "Color noise vs independent shocks maps to correlated vs idiosyncratic asset risks."
        },
        {
          "field_a_term": "Geometric mean fitness / long-run growth rate",
          "field_b_term": "Expected log wealth growth under reinvestment (Kelly-like objectives)",
          "note": "Same multiplicative growth mathematics motivates optimizing logs rather than arithmetic means."
        }
      ],
      "references": [
        {
          "doi": "10.1086/284072",
          "note": "Cohen (1966) Am. Nat. — classic diversification-of-risk framing in ecology"
        },
        {
          "doi": "10.2307/1907967",
          "note": "Kelly (1956) — growth-optimal betting links multiplicative growth and portfolio logs"
        }
      ],
      "last_reviewed": "2026-05-07",
      "file": "cross-domain/ecology-economics/b-bet-hedging-x-portfolio-diversification.yaml"
    },
    {
      "id": "b-commons-game-theory-ostrom",
      "title": "Hardin's tragedy of the commons is a prisoner's dilemma, and Ostrom's polycentric governance of common-pool resources is formally equivalent to the folk theorem of repeated game theory: communities that interact repeatedly sustain cooperation via conditional punishment strategies, provided the discount factor δ exceeds a critical cooperation threshold.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "Hardin (1968) argued that rational individuals sharing a common resource (fishery, pasture, aquifer) will inevitably overexploit it — each user captures the full benefit of increased extraction but shares the cost of depletion. This is a prisoner's dilemma (PD) with the payoff structure:\n  Cooperate",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-ostrom-design-principles-folk-theorem-mapping"
      ],
      "communication_gap": "Hardin (1968) wrote for biologists and ecologists without awareness of the formal game theory literature that already described the PD structure he was articulating. Ostrom (1990) developed her framework in political science and institutional economics, citing game theory informally but not deriving the folk theorem connection explicitly. Fudenberg & Maskin (1986) proved the folk theorem in the economics theory literature without reference to CPR governance. Axelrod (1984) connected TFT to cooperation in biology and political science but not explicitly to Ostrom's institutional framework. The complete formal equivalence — Ostrom's design principles = folk theorem conditions — is recognised in mechanism design theory but has not been translated back into ecology and conservation policy.\n",
      "translation_table": [
        {
          "field_a_term": "Prisoner's dilemma one-shot game",
          "field_b_term": "Tragedy of the commons (Hardin 1968)",
          "note": "One-shot interaction: defection (overextraction) is dominant strategy; depletion is inevitable"
        },
        {
          "field_a_term": "Discount factor δ (present value of future payoffs)",
          "field_b_term": "Frequency of interaction / shadow of the future in CPR community",
          "note": "Long-lived, frequently interacting communities have high δ; transient users have low δ"
        },
        {
          "field_a_term": "Critical cooperation threshold δ_c = (T-R)/(T-P)",
          "field_b_term": "Minimum community cohesion for sustainable commons governance",
          "note": "When interactions are frequent enough (δ > δ_c), cooperation is a Nash equilibrium"
        },
        {
          "field_a_term": "Grim-trigger strategy (defect forever after one deviation)",
          "field_b_term": "Exclusion from the commons for rule violations",
          "note": "Ostrom's graduated sanctions escalate toward permanent exclusion — the grim trigger"
        },
        {
          "field_a_term": "Tit-for-Tat (TFT) in Axelrod tournaments",
          "field_b_term": "Reciprocal rule enforcement in CPR institutions",
          "note": "TFT is the simplest folk-theorem cooperator; reciprocal monitoring enforces TFT socially"
        },
        {
          "field_a_term": "Folk theorem (any feasible payoff for δ > δ_c)",
          "field_b_term": "Ostrom's design principles (institutions that sustain cooperation)",
          "note": "Each design principle is an institutional mechanism that ensures δ > δ_c holds"
        },
        {
          "field_a_term": "Evolutionarily stable strategy (ESS) in replicator dynamics",
          "field_b_term": "Stable CPR governance institution resistant to defector invasion",
          "note": "Ostrom's long-lived institutions are ESS cooperative equilibria in evolutionary game theory"
        }
      ],
      "references": [
        {
          "doi": "10.1126/science.162.3859.1243",
          "note": "Hardin (1968) Science 162:1243 — The Tragedy of the Commons; PD structure of resource depletion"
        },
        {
          "note": "Ostrom (1990) Governing the Commons (Cambridge UP) — polycentric governance of CPR; Nobel 2009",
          "url": "https://doi.org/10.1017/CBO9780511807763"
        },
        {
          "note": "Axelrod (1984) The Evolution of Cooperation (Basic Books) — TFT wins iterated PD; biological and social applications",
          "url": "https://www.basicbooks.com/titles/robert-axelrod/the-evolution-of-cooperation/9780465005642/"
        },
        {
          "doi": "10.2307/1911307",
          "note": "Fudenberg & Maskin (1986) Econometrica 54:533 — the folk theorem in repeated games with discounting or incomplete information"
        },
        {
          "doi": "10.1126/science.1200177",
          "note": "Ostrom (2009) Science 325:419 — a general framework for analyzing sustainability of social-ecological systems (Nobel lecture)"
        }
      ],
      "last_reviewed": "2026-05-05",
      "file": "cross-domain/ecology-economics/b-commons-game-theory-ostrom.yaml"
    },
    {
      "id": "b-biomimicry-sustainable-design",
      "title": "Biomimicry applies 3.8 billion years of evolutionary R&D to engineering design: lotus superhydrophobicity, kingfisher-beak aerodynamics, whale-tubercle lift enhancement, spider-silk mechanics, and termite-mound passive ventilation each solve engineering problems through biological principles refined by natural selection.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "Biomimicry (Benyus 1997): natural selection has acted as a design engineer for 3.8 billion years, solving mechanical, thermal, optical, and chemical challenges under constraints of material efficiency, scalability, and self-repair that human engineering rarely achieves.\nLotus effect (superhydrophobi",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-biomimicry-design-convergence-performance-ceiling"
      ],
      "communication_gap": "Biomimicry is promoted in popular science (Benyus 1997, TED talks) but the engineering translation requires deep expertise in both the biological system (morphology, mechanics, physics of the biological solution) and the engineering application. Most engineers lack the biology background to identify relevant biological examples; most biologists lack the engineering background to extract design principles. Dedicated biomimicry research centers (e.g., Biomimicry Institute, AskNature database) have partially bridged this, but systematic translation remains rare.\n",
      "translation_table": [
        {
          "field_a_term": "lotus leaf dual-scale roughness (papillae + wax nanocrystals)",
          "field_b_term": "Cassie-Baxter superhydrophobic surface — contact angle >150°",
          "note": "Hierarchical structure is essential; single-scale roughness achieves partial effect only"
        },
        {
          "field_a_term": "kingfisher beak impedance-matching taper",
          "field_b_term": "Shinkansen nose profile — minimizes tunnel boom, reduces drag",
          "note": "Bio-inspired aerodynamic profile derived from studying diving behavior"
        },
        {
          "field_a_term": "whale tubercle passive vortex generators",
          "field_b_term": "wind turbine blade stall delay — lift angle +5°, efficiency +10%",
          "note": "Tubercles disrupt laminar boundary layer; delay stall without active control"
        },
        {
          "field_a_term": "spider silk beta-sheet nanocrystal network",
          "field_b_term": "high-toughness fiber — tensile strength 1.3 GPa, strain 40%",
          "note": "Outperforms Kevlar in toughness (area under stress-strain curve) due to large failure strain"
        },
        {
          "field_a_term": "termite mound thermal buoyancy ventilation",
          "field_b_term": "passive building ventilation — 85% energy savings vs. conventional A/C",
          "note": "Eastgate Centre demonstrates scalability; requires no external energy input"
        }
      ],
      "references": [
        {
          "note": "Benyus (1997) Biomimicry: Innovation Inspired by Nature. William Morrow"
        },
        {
          "doi": "10.1007/PL00008818",
          "note": "Barthlott & Neinhuis (1997) Planta 202:1 — lotus effect superhydrophobicity"
        },
        {
          "doi": "10.1002/jmor.1052250106",
          "note": "Fish & Battle (1995) J Morphol 225:51 — humpback whale tubercle geometry"
        },
        {
          "doi": "10.1242/jeb.202.23.3295",
          "note": "Gosline et al. (1999) J Exp Biol 202:3295 — mechanical properties of spider silk"
        }
      ],
      "last_reviewed": "2026-05-06",
      "file": "cross-domain/ecology-engineering/b-biomimicry-sustainable-design.yaml"
    },
    {
      "id": "b-precision-agriculture-remote-sensing",
      "title": "Precision Agriculture and Remote Sensing — NDVI satellite imagery, LiDAR canopy mapping, variable rate application, and machine learning yield forecasting for feeding 9 billion people",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "Precision agriculture applies site-specific crop management at sub-field resolution using spatial data from multiple sensor platforms. Multispectral satellite and drone imagery provides the most widespread data source: NDVI (Normalized Difference Vegetation Index) = (NIR−Red)/(NIR+Red), where NIR is",
      "open_unknowns": [],
      "related_hypotheses": [],
      "communication_gap": "Remote sensing scientists develop satellite analysis methods primarily for global change monitoring; precision agriculture practitioners are farmers and agronomists who need actionable field-scale decision support. The gap between research-grade remote sensing products (global 10 m NDVI) and farm-scale prescription maps (1–3 m resolution) involves substantial data processing, agronomic interpretation, and economic analysis that crosses disciplinary boundaries rarely bridged. Agricultural machine learning is a rapidly growing field but requires domain expertise (agronomy, soil science) that most ML practitioners lack.\n",
      "translation_table": [
        {
          "field_a_term": "NDVI = (NIR - Red)/(NIR + Red)",
          "field_b_term": "satellite proxy for crop chlorophyll content and photosynthetic vigour",
          "note": "NDVI < 0.3 indicates stressed or senescent vegetation; repeated time series tracks crop development stages"
        },
        {
          "field_a_term": "LiDAR point cloud (3D vegetation structure)",
          "field_b_term": "canopy height model and terrain digital elevation model for drainage analysis",
          "note": "LiDAR separates ground returns from vegetation; DTM (bare earth) - DSM (surface) = canopy height model"
        },
        {
          "field_a_term": "variable rate application (VRA) prescription maps",
          "field_b_term": "spatially explicit fertiliser and pesticide application reducing input waste",
          "note": "Agronomic benefit and environmental benefit are aligned: over-application wastes money and pollutes waterways"
        },
        {
          "field_a_term": "EM38 soil electrical conductivity (ECa)",
          "field_b_term": "proxy for soil clay content, moisture, and organic matter",
          "note": "Clay particles are electrically conductive; EM38 maps ECa variation correlating with sand/clay ratio at survey speed"
        },
        {
          "field_a_term": "CNN on satellite time series for crop classification",
          "field_b_term": "automated field-level crop type mapping for supply chain monitoring",
          "note": "LSTM or temporal CNN captures phenological patterns (NDVI curves vary by crop type); >95% accuracy in calibrated regions"
        },
        {
          "field_a_term": "yield gap analysis (actual vs. potential yield)",
          "field_b_term": "identification of management and resource constraints limiting productivity",
          "note": "Closing 50% of the global yield gap would provide sufficient calories for 9.7B without expanding cropland"
        }
      ],
      "references": [
        {
          "doi": "10.1016/j.compag.2013.07.004",
          "note": "Mulla (2013) Comput Electron Agric 93:2 — precision agriculture review"
        },
        {
          "doi": "10.1016/0034-4257(79)90013-0",
          "note": "Tucker (1979) Remote Sens Environ 8:127 — NDVI for vegetation monitoring"
        },
        {
          "doi": "10.1126/science.1183700",
          "note": "Gebbers & Adamchuk (2010) Science 327:828 — precision agriculture and food security"
        },
        {
          "doi": "10.1038/s41477-020-0768-8",
          "note": "Lobell et al. (2020) Nat Plants 6:29 — machine learning for crop yield forecasting"
        }
      ],
      "last_reviewed": "2026-05-06",
      "file": "cross-domain/ecology-engineering/b-precision-agriculture-remote-sensing.yaml"
    },
    {
      "id": "b-climate-tick-range-lyme",
      "title": "Climate warming, Ixodes tick range expansion, and Lyme disease incidence — an ecology–epidemiology bridge linking tick population dynamics and deer management to human disease burden.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "Lyme disease is simultaneously an ecological and epidemiological problem, but the two communities use different models, metrics, and interventions. Ecology side: Ixodes scapularis (black-legged tick) requires a 3-host life cycle across 2 years, ≥50% relative humidity for survival and development, wh",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-ohio-lyme-deer-management-intervention"
      ],
      "communication_gap": "Tick ecologists and field biologists publish primarily in Parasitology, Journal of Medical Entomology, and Ecological Applications. Epidemiologists studying Lyme disease publish in Emerging Infectious Diseases, MMWR, and American Journal of Epidemiology. The mathematical epidemiologists who develop R₀ models rarely have field ecology training. State health departments (who collect human incidence data) do not coordinate surveillance efforts with state wildlife agencies (who manage deer populations), even though deer density is the primary lever for reducing tick burden. Climate scientists whose models generate habitat projections are largely disconnected from both communities. This fragmentation is a direct barrier to evidence-based Lyme prevention policy.\n",
      "translation_table": [
        {
          "field_a_term": "deer density (ecology)",
          "field_b_term": "tick abundance per unit area (epidemiology-relevant exposure metric)",
          "note": "Deer density is the primary driver of adult tick reproductive success; epidemiology needs per-area tick nymph density as the proximal exposure metric"
        },
        {
          "field_a_term": "Peromyscus leucopus reservoir competence (ecology)",
          "field_b_term": "Borrelia force of infection from reservoir to tick (epidemiology)",
          "note": "Reservoir competence (% nymphs infected after feeding on reservoir host) translates ecological data to epidemiological transmission parameters"
        },
        {
          "field_a_term": "habitat suitability index for Ixodes scapularis (ecology/climate)",
          "field_b_term": "geographic risk surface for human Lyme exposure (epidemiology)",
          "note": "Ecological niche models can be converted to human exposure risk maps if calibrated with surveillance incidence data"
        },
        {
          "field_a_term": "tick questing behavior / host-finding rate (ecology)",
          "field_b_term": "contact rate between infectious vector and susceptible human (R₀ component)",
          "note": "The core bridge between field ecology (tick behavior studies) and mathematical epidemiology (R₀ formulation)"
        },
        {
          "field_a_term": "deer exclusion fence experiment (ecology)",
          "field_b_term": "Lyme incidence reduction in treated area (epidemiology outcome)",
          "note": "Daniels et al. (1993) showed deer exclusion reduces tick density; the epidemiological translation to human cases has not been rigorously quantified"
        }
      ],
      "references": [
        {
          "doi": "10.1016/j.ijpara.2008.01.014",
          "note": "Ogden et al. (2008) Role of migratory birds in introduction and range expansion of Ixodes scapularis ticks and of Borrelia burgdorferi and Anaplasma phagocytophilum in Canada. Int J Parasitol 38:887–897."
        },
        {
          "doi": "10.3201/eid2703.201994",
          "note": "Kugeler et al. (2021) Estimated annual infections of Lyme disease, United States, 2010–2018. Emerg Infect Dis 27:616."
        },
        {
          "doi": "10.1111/j.1461-0248.2008.01159.x",
          "note": "Brunner et al. (2008) Hosts as ecological traps for the vector of Lyme disease. Ecol Lett 11:820–828."
        },
        {
          "doi": "10.1038/s41598-019-43105-4",
          "note": "Bouchard et al. (2019) Harvested white-tailed deer as sentinel hosts for early establishing Ixodes scapularis populations and risk of Lyme disease in Quebec, Canada. Sci Rep 9:6694."
        }
      ],
      "last_reviewed": "2026-05-06",
      "file": "cross-domain/ecology-epidemiology/b-climate-tick-range-lyme.yaml"
    },
    {
      "id": "b-metapopulation-sir-patch-occupancy",
      "title": "Levins metapopulation patch-occupancy dynamics are formally equivalent to multi-patch SIR epidemic models: colonization rate maps to infection transmission, local extinction maps to recovery, and the rescue effect in ecology is mathematically identical to importation of infection across population patches\n",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "The Levins metapopulation equation dp/dt = c·p·(1-p) - e·p (p = fraction of occupied patches, c = colonization rate, e = extinction rate) is structurally identical to the mean-field SIR patch-infection equation dI/dt = β·I·(1-I)/N - γ·I; the metapopulation equilibrium p* = 1 - e/c maps directly to t",
      "open_unknowns": [],
      "related_hypotheses": [],
      "communication_gap": "Ecologists and epidemiologists developed metapopulation and SIR models in parallel through the 1970s–1990s without cross-citation; Levins (1969) predates the formal SIR spatial extension literature, and ecologists rarely cite the epidemiological literature on patch models. The shared mathematical structure is acknowledged in theoretical ecology and mathematical biology reviews but is not part of standard training in either field.\n",
      "translation_table": [
        {
          "field_a_term": "fraction of occupied patches p (ecology)",
          "field_b_term": "prevalence of infected sub-populations I/N (epidemiology)",
          "note": "Both track the fraction of a set of discrete units in the 'active' state; the governing ODEs are algebraically identical at mean-field level"
        },
        {
          "field_a_term": "colonization rate c (ecology)",
          "field_b_term": "transmission rate β (epidemiology)",
          "note": "c describes the rate at which empty patches become occupied via propagule rain; β describes the rate at which susceptible patches become infected via contact with infectious patches"
        },
        {
          "field_a_term": "local extinction rate e (ecology)",
          "field_b_term": "recovery/clearance rate γ (epidemiology)",
          "note": "e is the rate at which occupied patches go locally extinct; γ is the rate at which infected patches recover and become susceptible again"
        },
        {
          "field_a_term": "rescue effect — immigration reducing local extinction probability (ecology)",
          "field_b_term": "importation of infection reducing local extinction of outbreak (epidemiology)",
          "note": "In both systems, connectivity between patches prevents local stochastic extinction; the rescue effect in ecology and epidemic importation are the same phenomenon"
        },
        {
          "field_a_term": "metapopulation viability threshold c/e > 1 (ecology)",
          "field_b_term": "epidemic persistence threshold R₀ = β/γ > 1 (epidemiology)",
          "note": "Both represent the condition for a positive equilibrium state; below threshold the system collapses to extinction/disease-free equilibrium"
        }
      ],
      "references": [
        {
          "doi": "10.1038/227458a0",
          "note": "Levins (1969) — Some demographic and genetic consequences of environmental heterogeneity for biological control; original metapopulation patch-occupancy model"
        },
        {
          "doi": "10.1086/283427",
          "note": "Hanski & Gyllenberg (1993) — Two general metapopulation models and the core-satellite species hypothesis; formal analysis of patch dynamics"
        },
        {
          "doi": "10.1098/rspb.2003.2343",
          "note": "Keeling & Rohani (2002) — Estimating spatial coupling in epidemiological systems; bridges SIR spatial models with patch occupancy"
        },
        {
          "doi": "10.1016/j.tpb.2004.01.001",
          "note": "Hess (1996) — Disease in metapopulation models: implications for conservation; direct mapping of SIR onto Levins framework"
        }
      ],
      "last_reviewed": "2026-05-07",
      "file": "cross-domain/ecology-epidemiology/b-metapopulation-sir-patch-occupancy.yaml"
    },
    {
      "id": "b-animal-coloration-honest-signaling-fisher-runaway",
      "title": "Animal coloration for mate attraction is governed by two competing evolutionary mechanisms — honest signaling (Zahavian handicap) and Fisher runaway selection — which are formalized by different mathematical models connecting evolutionary biology to game theory and physics of symmetry breaking.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "The handicap principle (Zahavi 1975, Grafen 1990) models costly coloration as a signaling game: the ESS signal intensity satisfies a separating equilibrium where signal cost equals the benefit of attracting mates, mapping onto Spence's job-market signaling model. Fisher runaway (Lande 1981) is a dif",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-fisher-runaway-positive-feedback-speciation"
      ],
      "communication_gap": "Evolutionary biologists, physicists studying structural color, and economists studying signaling theory all work on animal coloration from different angles without systematic collaboration; the photonics of structural color is well-studied in materials science but rarely connected to the evolutionary dynamics of preference for structural vs. pigment-based signals.\n",
      "translation_table": [
        {
          "field_a_term": "carotenoid-based plumage brightness (evolutionary biology)",
          "field_b_term": "signal cost in separating equilibrium (game theory)",
          "note": "Parasite load reduces carotenoid availability — honest condition-dependent signal"
        },
        {
          "field_a_term": "female mate preference allele frequency (evolutionary biology)",
          "field_b_term": "unstable runaway trajectory in preference-trait genetic space (mathematics)",
          "note": "Lande's genetic covariance matrix drives Fisher runaway as a positive eigenvalue"
        },
        {
          "field_a_term": "structural iridescence / UV reflectance (evolutionary biology)",
          "field_b_term": "photonic crystal band gap (physics)",
          "note": "Nanostructured feather barbules generate structural color via thin-film interference"
        },
        {
          "field_a_term": "honest condition-dependent signal (evolutionary biology)",
          "field_b_term": "incentive-compatible mechanism in signaling game (economics)",
          "note": "Zahavian handicap = Spence signaling equilibrium — same mathematical structure"
        }
      ],
      "references": [
        {
          "doi": "10.1016/S0003-3472(75)80053-8",
          "note": "Zahavi (1975) — mate selection and the handicap principle"
        },
        {
          "doi": "10.1086/285448",
          "note": "Grafen (1990) — biological signals as handicaps — the formal theory"
        },
        {
          "doi": "10.2307/2408043",
          "note": "Lande (1981) — models of speciation by sexual selection"
        }
      ],
      "last_reviewed": "2026-05-07",
      "file": "cross-domain/ecology-evolutionary-biology/b-animal-coloration-honest-signaling-fisher-runaway.yaml"
    },
    {
      "id": "b-invasion-fitness-adaptive-dynamics-ess",
      "title": "Adaptive dynamics uses invasion fitness — the per-capita growth rate of a rare mutant in a resident population — to derive evolutionarily stable strategies (ESS) and evolutionary branching points, bridging ecology and evolutionary biology through a unified mathematical framework.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "In adaptive dynamics, the fitness of a rare mutant x' in a resident population at equilibrium with trait x is sx(x') = r(x', x̂(x)), where x̂(x) is the resident equilibrium. Evolution follows the canonical equation ẋ = (1/2)σ²·∂sx(x')/∂x'|_{x'=x}, climbing the fitness gradient. ESS are singular poin",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-evolutionary-branching-disruptive-selection-speciation"
      ],
      "communication_gap": "Adaptive dynamics is well-developed theoretically (Geritz, Metz, Dieckmann) but remains underutilized in empirical evolutionary biology; many evolutionary biologists are unfamiliar with the canonical equation formalism, and ecologists often treat evolution as a separate process rather than co-evolving with ecological dynamics.\n",
      "translation_table": [
        {
          "field_a_term": "trait evolution by natural selection (evolutionary biology)",
          "field_b_term": "gradient ascent on invasion fitness landscape (mathematics)",
          "note": "Canonical equation of adaptive dynamics is a first-order ODE in trait space"
        },
        {
          "field_a_term": "evolutionarily stable strategy (evolutionary biology)",
          "field_b_term": "Nash equilibrium of the invasion fitness game (mathematics)",
          "note": "ESS = uninvadable strategy = Nash equilibrium of the frequency-dependent game"
        },
        {
          "field_a_term": "evolutionary branching / sympatric speciation (evolutionary biology)",
          "field_b_term": "fitness minimum / bifurcation in trait space (mathematics)",
          "note": "Branching occurs at a CSS where the fitness function has negative curvature"
        },
        {
          "field_a_term": "resident equilibrium density (ecology)",
          "field_b_term": "background state determining the fitness landscape (mathematics)",
          "note": "Ecological dynamics set the resident equilibrium that defines the fitness function"
        }
      ],
      "references": [
        {
          "doi": "10.1006/tpbi.1997.1295",
          "note": "Geritz et al. (1998) — evolutionarily singular strategies and adaptive radiation"
        },
        {
          "doi": "10.1016/S0022-5193(02)93875-4",
          "note": "Dieckmann & Law (1996) — the dynamical theory of coevolution"
        },
        {
          "doi": "10.1086/285812",
          "note": "Metz et al. (1992) — invasion fitness and adaptive dynamics framework"
        }
      ],
      "last_reviewed": "2026-05-07",
      "file": "cross-domain/ecology-evolutionary-biology/b-invasion-fitness-adaptive-dynamics-ess.yaml"
    },
    {
      "id": "b-niche-construction-extended-evolutionary-synthesis",
      "title": "Niche construction — the modification of selective environments by organisms — creates ecological inheritance that complements genetic inheritance, and its dynamics are captured by an extended evolutionary synthesis model in which allele frequency changes couple bidirectionally to niche variables through a modified Price equation that accounts for both genetic selection and environmental feedback",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "Niche construction theory formalizes Lamarckian-style feedbacks within a rigorous Darwinian framework: the modified Price equation for niche-constructing populations includes an ecological inheritance term Delta_E alongside the standard genetic transmission term Delta_G; the coupled system can produ",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-niche-construction-accelerated-local-adaptation"
      ],
      "communication_gap": "Ecologists studying habitat modification and evolutionary biologists studying adaptation rarely share mathematical frameworks; the EES is acknowledged in theory but calibrated empirical tests using the full Price equation niche construction formalism are rare because the ecological inheritance term requires multigenerational environment tracking.",
      "translation_table": [
        {
          "field_a_term": "niche construction (organism modifies selective environment) (ecology)",
          "field_b_term": "frequency-dependent selection and environmental feedback in evolutionary dynamics (evolutionary biology)",
          "note": "Niche construction creates an extra selective feedback loop absent from standard population genetics"
        },
        {
          "field_a_term": "ecological inheritance (modified environment transmitted between generations) (ecology)",
          "field_b_term": "non-genetic inheritance alongside genetic transmission in Price equation (evolutionary biology)",
          "note": "Ecological inheritance enters the Price equation as an additional transmission term"
        },
        {
          "field_a_term": "niche construction rate K (ecology)",
          "field_b_term": "selection differential S for ecological variables in EES model (evolutionary biology)",
          "note": "Rate of environment modification determines the strength of the eco-evolutionary feedback"
        },
        {
          "field_a_term": "counteractive vs. reinforcing niche construction (ecology)",
          "field_b_term": "negative vs. positive frequency-dependent selection (evolutionary biology)",
          "note": "Counteractive NC stabilizes the system (NFD); reinforcing NC produces runaway evolution (PFD)"
        }
      ],
      "references": [
        {
          "doi": "10.1098/rstb.2003.1329",
          "note": "Odling-Smee et al. (2003) - niche construction, ecological inheritance, and cycles of contingency (foundational theory)"
        },
        {
          "doi": "10.1093/evq/evu017",
          "note": "Laland et al. (2014) - does evolutionary theory need a rethink? (EES arguments including NC)"
        },
        {
          "doi": "10.1098/rsbl.2015.1008",
          "note": "Loring et al. (2015) - empirical tests of niche construction theory in field populations"
        }
      ],
      "last_reviewed": "2026-05-07",
      "file": "cross-domain/ecology-evolutionary-biology/b-niche-construction-extended-evolutionary-synthesis.yaml"
    },
    {
      "id": "b-phenotypic-plasticity-reaction-norms",
      "title": "Phenotypic plasticity — the capacity of a single genotype to produce different phenotypes in different environments — is formalized by the reaction norm (phenotype-as-function-of-environment), whose shape, slope, and curvature are heritable quantitative traits subject to natural selection\n",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "A reaction norm W: E → P maps each environmental value e ∈ E to the expressed phenotype P(e) for a given genotype; the slope dP/de measures plasticity sensitivity, the curvature d²P/de² indicates canalization (resistance to environmental variation), and inter-individual variance in reaction norm sha",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-reaction-norm-slope-predicts-climate-tracking"
      ],
      "communication_gap": "Evolutionary biologists measure reaction norms through common garden and transplant experiments while mathematical biologists study function-valued traits and infinite-dimensional quantitative genetics; the application of functional data analysis and calculus of variations to reaction norm evolution is not standard in empirical biology.\n",
      "translation_table": [
        {
          "field_a_term": "reaction norm slope across environments (evolutionary biology)",
          "field_b_term": "derivative dP/de of the phenotype-environment function (mathematics)",
          "note": "Steep slopes indicate high plasticity; slope is heritable and can evolve by selection"
        },
        {
          "field_a_term": "genotype-by-environment interaction (evolutionary biology)",
          "field_b_term": "variation in reaction norm shape across genotypes (function space statistics) (mathematics)",
          "note": "G×E interaction is non-zero when different genotypes have different-shaped reaction norm functions"
        },
        {
          "field_a_term": "canalization (evolutionary biology)",
          "field_b_term": "low variance in phenotype across environmental range — flat reaction norm (mathematics)",
          "note": "Canalized traits have dP/de ≈ 0; canalization evolves when stable environment makes plasticity costly"
        },
        {
          "field_a_term": "genetic assimilation (evolutionary biology)",
          "field_b_term": "evolution of reaction norm intercept toward formerly plastic response — loss of slope (mathematics)",
          "note": "After environmental change, selection reduces slope and increases intercept; quantified by reaction norm morphology change"
        }
      ],
      "references": [
        {
          "doi": "10.1086/284827",
          "note": "Via & Lande (1985) - genotype-environment interaction and the evolution of phenotypic plasticity"
        },
        {
          "doi": "10.1111/evo.12175",
          "note": "Stinchcombe et al. (2012) - function-valued traits working group - genetics and evolution of function-valued traits"
        }
      ],
      "last_reviewed": "2026-05-07",
      "file": "cross-domain/ecology-evolutionary-biology/b-phenotypic-plasticity-reaction-norms.yaml"
    },
    {
      "id": "b-red-queen-coevolutionary-cycles",
      "title": "Antagonistic host-parasite coevolution drives persistent allele frequency cycling (Red Queen dynamics) whose period and amplitude are predicted by Lotka-Volterra-type coevolutionary equations analogous to ecological predator-prey cycles\n",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "The Red Queen hypothesis — that host populations must continuously evolve resistance to coevolving parasites — generates oscillatory allele frequency dynamics formally equivalent to ecological predator-prey cycles: parasite fitness increases when host is common (mimicking prey abundance driving pred",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-red-queen-cycling-sustained-by-spatial-structure"
      ],
      "communication_gap": "Evolutionary biologists study coevolution through field surveys and experimental evolution while applied mathematicians analyse predator-prey equations; the formal mathematical equivalence between Red Queen allele cycles and Lotka-Volterra population cycles is recognized theoretically but rarely exploited for quantitative prediction.\n",
      "translation_table": [
        {
          "field_a_term": "host resistance allele frequency p(t) (evolutionary biology)",
          "field_b_term": "prey population density N_H(t) in predator-prey ODE (mathematics)",
          "note": "Host resistance frequency plays the role of prey density: increases when parasite frequency is low"
        },
        {
          "field_a_term": "parasite virulence allele frequency q(t) (evolutionary biology)",
          "field_b_term": "predator population density N_P(t) in predator-prey ODE (mathematics)",
          "note": "Parasite frequency plays predator role: increases when host is common and susceptible"
        },
        {
          "field_a_term": "selection coefficient s for resistance/virulence (evolutionary biology)",
          "field_b_term": "interaction coefficient α in Lotka-Volterra predator-prey equations (mathematics)",
          "note": "Fitness benefit of resistance ~ fitness benefit of prey for predator; both control cycle period"
        },
        {
          "field_a_term": "negative frequency-dependent selection (evolutionary biology)",
          "field_b_term": "oscillatory stability in Lotka-Volterra predator-prey system (mathematics)",
          "note": "Both systems produce persistent cycles rather than equilibria when interactions are negative-frequency-dependent"
        }
      ],
      "references": [
        {
          "doi": "10.1038/nature09207",
          "note": "Decaestecker et al. (2007) - host-parasite Red Queen dynamics archived in pond sediment"
        },
        {
          "doi": "10.1098/rspb.2011.0171",
          "note": "Koskella & Lively (2009) - evidence for Red Queen cycling through temporal tracking of parasite adaptation"
        }
      ],
      "last_reviewed": "2026-05-07",
      "file": "cross-domain/ecology-evolutionary-biology/b-red-queen-coevolutionary-cycles.yaml"
    },
    {
      "id": "b-ess-ecosystem-dynamics",
      "title": "Maynard Smith's evolutionarily stable strategies are Nash equilibria of the ecological game: replicator dynamics on the strategy simplex unifies evolutionary game theory with Lotka-Volterra competition, and rock-paper-scissors cyclic dominance maintains biodiversity.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "Maynard Smith & Price (1973) introduced the evolutionarily stable strategy (ESS) concept by applying game theory to biology. The resulting framework unifies evolutionary and ecological dynamics with remarkable precision.\n1. ESS as Nash equilibrium. A strategy I is an ESS if no mutant strategy J\n   c",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-cyclic-dominance-spatial-heterogeneity-biodiversity"
      ],
      "communication_gap": "Game theorists (economics, mathematics) and ecologists publish in separate journals (Games and Economic Behavior vs. Ecology, American Naturalist). Evolutionary game theory was largely developed by biologists (Maynard Smith, Hamilton, Parker) and theoretical ecologists (May, Hofbauer) rather than economists — creating a parallel literature that economists are often unaware of. The replicator dynamics / Lotka-Volterra equivalence is well-known to theoretical ecologists but rarely taught in game theory courses.\n",
      "translation_table": [
        {
          "field_a_term": "Nash equilibrium (game theory)",
          "field_b_term": "evolutionarily stable strategy ESS (evolutionary biology)",
          "note": "ESS is Nash equilibrium with additional stability condition against invasion"
        },
        {
          "field_a_term": "payoff matrix aᵢⱼ (game theory)",
          "field_b_term": "competition coefficients αᵢⱼ / fitness matrix (ecology)",
          "note": "Payoff = fitness effect of interaction; competition matrix = negative payoff"
        },
        {
          "field_a_term": "strategy frequency xᵢ (game theory)",
          "field_b_term": "species relative abundance (ecology)",
          "note": "Population frequency in evolutionary game = species abundance in ecology"
        },
        {
          "field_a_term": "dominant strategy (game theory)",
          "field_b_term": "competitively superior species / competitive exclusion (ecology)",
          "note": "Dominant strategy wins = Gause's law of competitive exclusion"
        },
        {
          "field_a_term": "mixed Nash equilibrium (game theory)",
          "field_b_term": "stable coexistence at interior fixed point (ecology)",
          "note": "Mixed strategy equilibrium = multi-species stable coexistence"
        },
        {
          "field_a_term": "rock-paper-scissors non-transitive game (game theory)",
          "field_b_term": "cyclic dominance among species (ecology)",
          "note": "Non-transitive competition produces cyclic population dynamics"
        }
      ],
      "references": [
        {
          "doi": "10.1038/246015a0",
          "note": "Maynard Smith & Price (1973) Nature 246:15 — ESS and the logic of animal conflict"
        },
        {
          "doi": "10.1137/0128012",
          "note": "May & Leonard (1975) SIAM J Appl Math — nonlinear aspects of competition"
        },
        {
          "doi": "10.1038/nature00823",
          "note": "Kerr et al. (2002) Nature 418:171 — local dispersal promotes rock-paper-scissors biodiversity"
        },
        {
          "url": "https://www.cambridge.org/core/books/evolutionary-games-and-population-dynamics/7DFEBF549A5CEE74EFF8A40CF92B92DC",
          "note": "Hofbauer & Sigmund (1998) Evolutionary Games and Population Dynamics — Cambridge"
        }
      ],
      "last_reviewed": "2026-05-06",
      "file": "cross-domain/ecology-game-theory/b-ess-ecosystem-dynamics.yaml"
    },
    {
      "id": "b-biodiversity-entropy-measures",
      "title": "Shannon entropy applied to species relative abundances gives the Shannon diversity index; Hill numbers unify Shannon (q→1), Simpson (q=2), and species richness (q=0) as the Rényi entropy family applied to ecology; and MaxEnt models derive species abundance distributions from the same thermodynamic analogy that produces the Boltzmann distribution.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "Shannon's entropy H = -Σ_i p_i log p_i applied to species i with relative abundance p_i is used directly as a biodiversity index (H' or Shannon diversity), quantifying uncertainty in the species identity of a randomly drawn individual. Hill (1973) unified all common diversity indices as instances of",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-hill-numbers-unify-diversity-indices"
      ],
      "communication_gap": "Shannon's 1948 paper was written for electrical engineers and communication theorists. Hill (1973) published in the ecology journal Ecology; the paper is well-known among ecologists but the Rényi entropy connection is rarely acknowledged. Information theorists working on entropy measures rarely read ecology journals. Harte's MaxEnt ecology textbook (2011) is technically demanding and has been critiqued (Haegeman & Etienne 2010) but has not generated the cross-disciplinary engagement with statistical physicists that it deserves.\n",
      "translation_table": [
        {
          "field_a_term": "Shannon entropy H = -Σ p_i log p_i",
          "field_b_term": "Shannon diversity index H' (nats or bits of species uncertainty)",
          "note": "numerically identical; different interpretations and units"
        },
        {
          "field_a_term": "Rényi entropy R_q = (1/(1-q)) log Σ p_i^q",
          "field_b_term": "Hill number ^qD (effective number of species at diversity order q)",
          "note": "Hill (1973) expressed Rényi entropy as species diversity with natural units"
        },
        {
          "field_a_term": "diversity order q (sensitivity parameter)",
          "field_b_term": "weighting of rare vs. common species in diversity measurement",
          "note": "q=0: all species equal; q=1: weighted by abundance; q=2: dominated by common species"
        },
        {
          "field_a_term": "mutual information I(A;B)",
          "field_b_term": "beta diversity (compositional turnover between sites)",
          "note": "quantifies how much knowledge of site A reduces uncertainty about site B"
        },
        {
          "field_a_term": "maximum entropy distribution under constraints",
          "field_b_term": "predicted species abundance distribution (SADs)",
          "note": "Harte (2011) MaxEnt ecology recovers empirical SADs from energy/abundance constraints"
        },
        {
          "field_a_term": "Boltzmann distribution (Gibbs ensemble)",
          "field_b_term": "species rank-abundance distribution in MaxEnt ecology"
        }
      ],
      "references": [
        {
          "note": "Shannon (1948) — A mathematical theory of communication; Bell Syst Tech J 27:379"
        },
        {
          "doi": "10.2307/1934352",
          "note": "Hill (1973) — Diversity and evenness; a unifying notation and its consequences; Ecology 54:427"
        },
        {
          "doi": "10.1146/annurev-ecolsys-120213-091540",
          "note": "Chao et al. (2014) — Rarefaction and extrapolation with Hill numbers; Annu Rev Ecol Evol Syst 45:297"
        },
        {
          "note": "Harte (2011) — Maximum Entropy and Ecology; Oxford University Press"
        }
      ],
      "last_reviewed": "2026-05-06",
      "file": "cross-domain/ecology-information-theory/b-biodiversity-entropy-measures.yaml"
    },
    {
      "id": "b-vision-transformer-x-crop-stress-phenotyping",
      "title": "Vision transformer attention maps bridge long-range image-context modeling and field-scale crop stress phenotyping.",
      "source_domain": "",
      "target_domain": "",
      "status": "proposed",
      "bridge_claim": "Speculative analogy (to be empirically validated): Transformer attention over multi-scale canopy imagery can act as a surrogate for agronomic context integration used to infer emergent crop stress patterns across heterogeneous fields.",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-vit-based-phenotyping-improves-early-crop-stress-detection"
      ],
      "communication_gap": "Domain operators prioritize interpretable reliability diagnostics, while ML work often prioritizes aggregate accuracy without deployment-grade uncertainty audits.",
      "translation_table": [
        {
          "field_a_term": "model prior",
          "field_b_term": "domain prior",
          "note": "Both constrain inference in data-sparse regimes."
        },
        {
          "field_a_term": "uncertainty estimate",
          "field_b_term": "risk-aware decision support",
          "note": "Uncertainty quality determines practical utility."
        },
        {
          "field_a_term": "out-of-distribution behavior",
          "field_b_term": "deployment robustness",
          "note": "Shift sensitivity governs real-world reliability."
        }
      ],
      "references": [
        {
          "arxiv": "2010.11929",
          "note": "Vision Transformer architecture."
        }
      ],
      "last_reviewed": "2026-05-08",
      "file": "cross-domain/ecology-machine-learning/b-vision-transformer-x-crop-stress-phenotyping.yaml"
    },
    {
      "id": "b-animal-migration-optimal-foraging-theory",
      "title": "Animal migration routes and stopover decisions are predicted by optimal foraging theory and dynamic programming, treating migration as an energy-budget optimization problem with the same mathematical structure as economic resource allocation.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "Migration is an optimal control problem: a bird maximizes total fitness (arrival mass, breeding date) by choosing when to depart, which stopover sites to use, and how much fuel to carry, subject to predation and weather stochasticity. Stochastic dynamic programming (Bellman equation) solves this exa",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-energy-minimization-migration-route-selection"
      ],
      "communication_gap": "Ornithologists tracking migration and applied mathematicians solving optimal control problems rarely co-publish; the optimal migration theory literature (Alerstam, Houston) is mature but underused in conservation planning, where managers still rely on descriptive phenological data.\n",
      "translation_table": [
        {
          "field_a_term": "migratory bird departure decision (ecology)",
          "field_b_term": "stopping criterion in dynamic programming (mathematics)",
          "note": "Optimal departure time maximizes the value function V(fat reserve, date, location)"
        },
        {
          "field_a_term": "fuel deposition rate at stopover (ecology)",
          "field_b_term": "reward rate in marginal-value theorem (mathematics)",
          "note": "Birds should leave a stopover when instantaneous gain rate falls to the travel-averaged rate"
        },
        {
          "field_a_term": "carry-over effects between breeding and wintering (ecology)",
          "field_b_term": "state-dependent fitness landscape (mathematics)",
          "note": "Body condition is a state variable coupling seasons in the Bellman recursion"
        },
        {
          "field_a_term": "wind assistance / atmospheric corridor (ecology)",
          "field_b_term": "stochastic environment in Markov decision process (mathematics)",
          "note": "Wind fields enter as random transitions in the MDP state space"
        }
      ],
      "references": [
        {
          "doi": "10.1086/283741",
          "note": "Alerstam & Hedenstrom (1998) — review of optimal migration theory and dynamic programming"
        },
        {
          "doi": "10.1086/284375",
          "note": "Houston (1998) — marginal value theorem applied to migratory fueling decisions"
        },
        {
          "doi": "10.1890/07-0834.1",
          "note": "Bauer et al. (2011) — stochastic dynamic programming model of shorebird migration"
        }
      ],
      "last_reviewed": "2026-05-07",
      "file": "cross-domain/ecology-mathematics/b-animal-migration-optimal-foraging-theory.yaml"
    },
    {
      "id": "b-chaos-population-cycles",
      "title": "The logistic map x_{n+1} = rx_n(1-x_n) exhibits period-doubling bifurcations to chaos at the Feigenbaum constant δ = 4.669..., which is universal across all 1D unimodal maps; real laboratory populations (Tribolium, Drosophila) undergo the same bifurcation cascade, establishing chaos theory as a mathematical framework for ecological population dynamics.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "May (1976) showed that even simple 1D population models (logistic map x_{n+1} = rx_n(1-x_n)) exhibit period-doubling bifurcations to chaos as r increases past r_∞ ≈ 3.57. Chaotic population dynamics: sensitive dependence on initial conditions, strange attractors, positive Lyapunov exponents. Observe",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-logistic-map-feigenbaum-ecology-universality"
      ],
      "communication_gap": "May's 1976 Nature paper reached ecologists but Feigenbaum's 1978 mathematical universality result appeared in Journal of Statistical Physics and reached mainly physicists. The profound implication — that ecological and physical systems obey identical mathematical universality — was articulated by few and reached standard ecology curricula only slowly through the 1980s-90s.\n",
      "translation_table": [
        {
          "field_a_term": "logistic map parameter r (intrinsic growth rate)",
          "field_b_term": "r_max (maximum per-capita population growth rate)",
          "note": "same parameter in mathematical model and ecological interpretation"
        },
        {
          "field_a_term": "period-doubling bifurcation (at r≈3.0, 3.45, 3.54...)",
          "field_b_term": "transition from stable annual cycle to 2-year, 4-year, 8-year cycles",
          "note": "Canadian lynx may show 10-year cycles; rodent 4-year cycles may be near bifurcation"
        },
        {
          "field_a_term": "Feigenbaum constant δ = 4.669... (universal across maps)",
          "field_b_term": "ratio of successive bifurcation intervals in any density-regulated population",
          "note": "universality means ecological details don't change the mathematical structure"
        },
        {
          "field_a_term": "strange attractor (bounded, aperiodic, sensitive)",
          "field_b_term": "irregular population cycles that are deterministic, not stochastic",
          "note": "chaos explains irregular fluctuations without requiring environmental noise"
        },
        {
          "field_a_term": "Lyapunov exponent λ > 0 (exponential divergence of trajectories)",
          "field_b_term": "long-term unpredictability of population size despite deterministic model",
          "note": "ecological chaos has finite prediction horizon ≈ 1/λ generations"
        }
      ],
      "references": [
        {
          "doi": "10.1038/261459a0",
          "note": "May (1976) Nature 261:459 — simple mathematical models with complicated dynamics"
        },
        {
          "doi": "10.1007/BF01107909",
          "note": "Feigenbaum (1978) J Stat Phys 19:25 — universal metric properties in nonlinear transformations"
        },
        {
          "doi": "10.1126/science.275.5298.389",
          "note": "Costantino et al. (1997) Science 275:389 — chaotic dynamics in Tribolium"
        },
        {
          "note": "Ellner & Turchin (1995) Am Nat 145:343 — chaos in ecology: detecting it"
        }
      ],
      "last_reviewed": "2026-05-06",
      "file": "cross-domain/ecology-mathematics/b-chaos-population-cycles.yaml"
    },
    {
      "id": "b-forest-gap-dynamics-x-neutral-theory-sampling",
      "title": "Disturbance-driven canopy gaps reset local competitive hierarchies and recruit colonists from a regional pool — paralleling Hubbell-style neutral sampling of equivalent individuals under fixed biodiversity number θ when dispersal limitation and stochastic recruitment dominate niche differentiation across gap-age ensembles.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "proposed",
      "bridge_claim": "Gap frequency-size distributions control local transient openness; neutral theory predicts abundance spectra via urn-like sampling when fitness differences are small relative to demographic stochasticity — gaps intensify equivalence by repeatedly fragmenting competitive monopolies, yet soil legacies",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-neutral-theta-estimates-converge-pre-post-gap-chronosequence"
      ],
      "communication_gap": "Forest-gap literature emphasizes photophysiology and succession tables while neutral theory papers emphasize allele-like symmetry assumptions — synthesis reviews exist but routine cross-citation in field protocols remains uneven.\n",
      "translation_table": [
        {
          "field_a_term": "gap formation rate and size distribution (windthrows, fire scars)",
          "field_b_term": "immigration probability m and θ controlling regional species turnover in neutral models",
          "note": "Both shape local lineage arrival processes; gap ecology adds explicit spatial wounds not present in mean-field neutral formulations."
        },
        {
          "field_a_term": "successional age since disturbance",
          "field_b_term": "effective generation time before drift-fixation balances immigration",
          "note": "Chronosequences map loosely onto composite migration–drift tradeoffs."
        },
        {
          "field_a_term": "dispersal kernels into gaps (distance decay)",
          "field_b_term": "dispersal limitation kernels in spatially explicit neutral models",
          "note": "Formal spatial neutral models exist; empirical gap datasets rarely fit them jointly."
        }
      ],
      "references": [
        {
          "doi": "10.1126/science.1066854",
          "note": "Hubbell (2001) — unified neutral theory of biodiversity and biogeography (Science)"
        },
        {
          "doi": "10.1086/285468",
          "note": "Brokaw (1985) — gap-phase regeneration structure in tropical forests (classic gap dynamics)"
        }
      ],
      "last_reviewed": "2026-05-07",
      "file": "cross-domain/ecology-mathematics/b-forest-gap-dynamics-x-neutral-theory-sampling.yaml"
    },
    {
      "id": "b-forest-succession-intermediate-disturbance",
      "title": "Forest succession following disturbance exhibits maximum species diversity at intermediate disturbance frequency and intensity (the Intermediate Disturbance Hypothesis), modeled as a nonlinear dynamical system where competitive exclusion reduces diversity at low disturbance and extinction increases it at high disturbance, with a diversity peak at the bifurcation boundary",
      "source_domain": "",
      "target_domain": "",
      "status": "contested",
      "bridge_claim": "Connell's (1978) Intermediate Disturbance Hypothesis (IDH) predicts a unimodal relationship between disturbance and diversity: at low disturbance, competitive exclusion reduces diversity to the competitive dominant; at high disturbance, extinction outpaces colonization; at intermediate levels, coexi",
      "open_unknowns": [],
      "related_hypotheses": [],
      "communication_gap": "Ecologists test the IDH empirically in forests, coral reefs, and grasslands with mixed results while dynamical systems mathematicians study competition-colonization tradeoffs in abstract models; the nonlinear dynamics framework for predicting when IDH holds versus fails (e.g., in non-symmetric communities) is underutilized in applied forest ecology and conservation management.",
      "translation_table": [
        {
          "field_a_term": "forest succession trajectory (ecology)",
          "field_b_term": "orbit in community composition phase space (nonlinear dynamics)",
          "note": "Post-disturbance succession is a trajectory from pioneer community to climax attractor; disturbance resets the initial condition"
        },
        {
          "field_a_term": "competitive exclusion (ecology)",
          "field_b_term": "convergence to single-species attractor under Lotka-Volterra competition (nonlinear dynamics)",
          "note": "Without disturbance, competitive dominant drives others to extinction — the competitive exclusion fixed point"
        },
        {
          "field_a_term": "disturbance frequency / return interval (ecology)",
          "field_b_term": "impulse frequency parameter in periodically forced dynamical system (nonlinear dynamics)",
          "note": "Low ω = rare disturbance (competitive exclusion dominates); high ω = frequent resets (extinction dominates); intermediate ω = maximum diversity"
        },
        {
          "field_a_term": "IDH diversity peak (ecology)",
          "field_b_term": "maximum of diversity measure as function of disturbance parameter (nonlinear dynamics)",
          "note": "Diversity maximum is a fold in the bifurcation diagram; can be detected by sensitivity of equilibrium to parameter changes"
        }
      ],
      "references": [
        {
          "doi": "10.1086/283246",
          "note": "Connell (1978) - diversity in tropical rain forests and coral reefs: original IDH"
        },
        {
          "doi": "10.1086/285949",
          "note": "Huston (1979) - general hypothesis of species diversity: theoretical model"
        },
        {
          "doi": "10.1890/08-2302.1",
          "note": "Fox (2013) - intermediate disturbance hypothesis should be abandoned: meta-analysis"
        }
      ],
      "last_reviewed": "2026-05-07",
      "file": "cross-domain/ecology-mathematics/b-forest-succession-intermediate-disturbance.yaml"
    },
    {
      "id": "b-invasive-species-reaction-diffusion",
      "title": "Invasive species range expansion follows the Fisher-KPP reaction-diffusion equation: the asymptotic front speed c*=2√(rD) depends only on intrinsic growth rate r and diffusivity D",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "The density u(x,t) of an invading species satisfies the Fisher-KPP PDE: ∂u/∂t = D·∂²u/∂x² + ru(1-u/K) where D is spatial diffusivity (km²/yr), r is intrinsic growth rate (yr⁻¹), and K is carrying capacity. For localized initial conditions, the solution develops into a traveling wave that propagates ",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-invasive-species-reaction-diffusion"
      ],
      "communication_gap": "Ecologists who model invasive species range expansion use empirical curve-fitting (logistic, exponential growth to area) without typically applying the Fisher-KPP framework. The mathematical connection between dispersal kernels (from mark-recapture data) and diffusion coefficients requires a formal diffusion approximation step that is standard in physics but not in ecology curricula.\n",
      "translation_table": [
        {
          "field_a_term": "range expansion front of an invading species",
          "field_b_term": "traveling wave solution of the Fisher-KPP PDE",
          "note": "Wave speed c=2√(rD) predicted from parameters measurable at low density"
        },
        {
          "field_a_term": "intrinsic growth rate r (demographic)",
          "field_b_term": "reaction term slope f'(0) at zero density in the KPP condition",
          "note": "KPP condition: f(u)/u ≤ f'(0) for all u>0 ensures c*=2√(Df'(0))"
        },
        {
          "field_a_term": "dispersal diffusivity D (random walk variance/2t)",
          "field_b_term": "diffusion coefficient in Fick's second law",
          "note": "D estimated from mark-recapture displacement data: D = MSD/4t in 2D"
        },
        {
          "field_a_term": "Allee effect (per-capita growth negative below threshold)",
          "field_b_term": "bistable reaction term — violates KPP condition, produces pushed waves",
          "note": "Pushed waves with Allee effect have c > c* and are selected by initial condition"
        }
      ],
      "references": [
        {
          "doi": "10.1111/j.1469-1809.1937.tb02153.x",
          "note": "Fisher (1937) The wave of advance of advantageous genes. Ann Eugen 7:355"
        },
        {
          "doi": "10.1007/978-3-642-46466-3",
          "note": "Kolmogorov, Petrovsky & Piskunov (1937) Study of the diffusion equation with growth of the quantity of matter and its application to a biology problem"
        },
        {
          "doi": "10.1016/j.tree.2009.06.013",
          "note": "Hastings et al. (2005) The spatial spread of invasions — new developments in theory and evidence. Ecol Lett 8:91"
        }
      ],
      "last_reviewed": "2026-05-07",
      "file": "cross-domain/ecology-mathematics/b-invasive-species-reaction-diffusion.yaml"
    },
    {
      "id": "b-landscape-ecology-graph-theory",
      "title": "Landscape ecology's analysis of habitat connectivity maps directly onto weighted graph theory, enabling circuit-theoretic gene flow prediction, least-cost corridor design, and percolation-theoretic thresholds for landscape connectivity collapse.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "Landscape ecology studies how spatial heterogeneity affects ecological processes. Habitat patches become graph nodes; dispersal corridors become weighted edges where weights represent dispersal resistance (inverse of habitat quality, road mortality rate, etc.). Least-cost path algorithms (Dijkstra's",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-circuit-theory-outperforms-lcp-gene-flow-prediction"
      ],
      "communication_gap": "Landscape ecologists are trained in spatial statistics and GIS; graph theorists and circuit theory experts are mathematicians and physicists. The Circuitscape software (McRae) made circuit theory accessible to ecologists without mathematical training, but the underlying mathematical theory (random walks, spectral graph theory) is rarely taught in ecology graduate programs. Conservation planners who implement corridor projects often use the tools without understanding the graph-theoretic foundation, limiting their ability to extend methods to new problems.\n",
      "translation_table": [
        {
          "field_a_term": "habitat patch",
          "field_b_term": "graph node",
          "note": "Node attributes include patch area, quality, population size"
        },
        {
          "field_a_term": "dispersal corridor / matrix permeability",
          "field_b_term": "edge weight (resistance to dispersal)",
          "note": "Resistance surfaces combine land cover, roads, slope, human density"
        },
        {
          "field_a_term": "least-cost path",
          "field_b_term": "shortest path in weighted graph (Dijkstra's algorithm)",
          "note": "Standard in Circuitscape software for corridor planning"
        },
        {
          "field_a_term": "effective resistance between patches",
          "field_b_term": "random walk commute time / gene flow predictor",
          "note": "McRae (2006) proved equivalence between circuit resistance and random walk commute time"
        },
        {
          "field_a_term": "percolation threshold p_c ≈ 0.59",
          "field_b_term": "critical proportion of suitable habitat for landscape connectivity",
          "note": "With & Crist (1995) showed empirical confirmation of percolation threshold in arthropods"
        }
      ],
      "references": [
        {
          "doi": "10.1890/0012-9658(2001)082[1205:HAAPCE]2.0.CO;2",
          "note": "Urban & Keitt (2001) — Landscape connectivity: a graph-theoretic perspective, Ecology 82:1205"
        },
        {
          "doi": "10.1890/07-1861.1",
          "note": "McRae et al. (2008) — Using circuit theory to model connectivity in ecology, Ecology 89:2712"
        },
        {
          "doi": "10.2307/1941982",
          "note": "With & Crist (1995) — Critical thresholds in species responses to landscape structure, Ecology 76:2446"
        },
        {
          "doi": "10.2307/3544927",
          "note": "Taylor et al. (1993) — Connectivity is a vital element of landscape structure, Oikos 68:571"
        }
      ],
      "last_reviewed": "2026-05-06",
      "file": "cross-domain/ecology-mathematics/b-landscape-ecology-graph-theory.yaml"
    },
    {
      "id": "b-metapopulation-dynamics-patch-theory",
      "title": "Levins' metapopulation model and Hanski's incidence function model connect island biogeography theory to dynamic landscape ecology, replacing the static species-area relationship with a mechanistic extinction-colonisation balance governed by the metapopulation capacity — the dominant eigenvalue of the landscape connectivity matrix.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "MacArthur & Wilson (1963, 1967) island biogeography: species number on an island S follows a species-area relationship S = cA^z (z ≈ 0.25 for oceanic islands). Species richness represents a dynamic equilibrium between immigration from mainland and local extinction. This was a static, non-mechanistic",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-metapopulation-capacity-climate-refugia-network"
      ],
      "communication_gap": "Island biogeography (MacArthur & Wilson) was published in Ecology and as a monograph — accessible to ecologists. Levins (1969) published in Bulletin of the Entomological Society of America — an obscure journal that delayed uptake by a decade. Hanski's work (Finnish Academy, published in Nature, American Naturalist, and Oikos) brought metapopulation theory to mainstream ecology but required 20+ years of long-term empirical data to convince sceptical ecologists. The metapopulation capacity eigenvalue formulation is mathematically advanced (linear algebra of large sparse matrices) and uncommon in ecological training. Conservation practitioners rarely perform eigenvalue calculations and rely on surrogate metrics (area, buffer zones) that MCA shows are inferior to λ_M.\n",
      "translation_table": [
        {
          "field_a_term": "habitat patch i (area, quality, location)",
          "field_b_term": "node in a graph (with weight = area)"
        },
        {
          "field_a_term": "colonisation rate cᵢⱼ (dispersal from j to i)",
          "field_b_term": "edge weight in dispersal graph"
        },
        {
          "field_a_term": "local extinction rate eᵢ (per patch)",
          "field_b_term": "decay rate of node occupancy"
        },
        {
          "field_a_term": "metapopulation capacity λ_M (dominant eigenvalue)",
          "field_b_term": "spectral radius of the colonisation matrix (Perron-Frobenius eigenvalue)"
        },
        {
          "field_a_term": "persistence threshold λ_M > e/c",
          "field_b_term": "R₀ > 1 threshold in epidemiology (SIR model)"
        },
        {
          "field_a_term": "extinction debt (committed but delayed extinction)",
          "field_b_term": "transient behaviour after parameter shift below threshold"
        },
        {
          "field_a_term": "species-area relationship (static, MacArthur-Wilson)",
          "field_b_term": "equilibrium of metapopulation at θ = 0 dispersal (degenerate case)"
        }
      ],
      "references": [
        {
          "note": "Levins (1969) Bull Entomol Soc Am 15:237 — metapopulation model"
        },
        {
          "doi": "10.1086/285744",
          "note": "Hanski (1994) Am Nat 144:646 — incidence function model"
        },
        {
          "note": "Hanski (1999) Metapopulation Ecology. Oxford University Press"
        },
        {
          "doi": "10.1086/303056",
          "note": "Ovaskainen & Hanski (2001) Am Nat 158:538 — metapopulation capacity"
        }
      ],
      "last_reviewed": "2026-05-06",
      "file": "cross-domain/ecology-mathematics/b-metapopulation-dynamics-patch-theory.yaml"
    },
    {
      "id": "b-neutral-theory-random-matrix",
      "title": "Hubbell's neutral theory of biodiversity treats species as statistically equivalent; May (1972) showed random ecosystems become unstable above a complexity threshold — both results are applications of random matrix theory (Wigner's semicircle law) to community ecology.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "proposed",
      "bridge_claim": "Two mathematical results from random matrix theory (RMT) have profoundly shaped ecology, with implications that are still being worked out:\n1. MAY'S STABILITY CRITERION (1972):\n   For a community of S species with random interaction matrix A (entries\n   drawn from N(0, σ²)), the community equilibriu",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-may-stability-real-ecosystem-applicability"
      ],
      "communication_gap": "Ecologists rarely read random matrix theory literature (published in pure mathematics and mathematical physics journals); RMT theorists are rarely aware of ecology applications. May's 1972 Nature paper created the connection, but the full RMT toolkit (free probability, structured ensembles, finite-size corrections) has barely been applied to ecological systems. Allesina & Tang (2012) was a significant step; follow-up work is sparse relative to the mathematical opportunity.\n",
      "translation_table": [
        {
          "field_a_term": "Wigner semicircle law",
          "field_b_term": "May's stability criterion for random ecosystems",
          "note": "The spectral radius σ√(SC) from the semicircle law directly gives May's instability threshold"
        },
        {
          "field_a_term": "random matrix ensemble (Wigner GOE/GUE)",
          "field_b_term": "community interaction matrix with random strengths",
          "note": "May (1972) used a Gaussian random matrix; Allesina & Tang used structured RMT"
        },
        {
          "field_a_term": "eigenvalue distribution",
          "field_b_term": "ecological stability spectrum",
          "note": "Stability requires all eigenvalues to have negative real parts; RMT gives the distribution"
        },
        {
          "field_a_term": "random walk on simplex",
          "field_b_term": "neutral theory ecological drift",
          "note": "Hubbell's neutral model is mathematically a multi-dimensional random walk on the abundance simplex"
        },
        {
          "field_a_term": "log-series / Poisson-lognormal distribution",
          "field_b_term": "species abundance distribution (SAD)",
          "note": "Neutral theory predicts these specific SADs; observed deviations quantify niche structure"
        }
      ],
      "references": [
        {
          "note": "Hubbell, S.P. (2001). The Unified Neutral Theory of Biodiversity and Biogeography. Princeton University Press. -- The neutral theory of biodiversity"
        },
        {
          "doi": "10.1038/238413a0",
          "note": "May, R.M. (1972). Will a large complex system be stable? Nature 238:413–414."
        },
        {
          "doi": "10.1038/nature10832",
          "note": "Allesina, S. & Tang, S. (2012). Stability criteria for complex ecosystems. Nature 483:205–208."
        },
        {
          "note": "Wigner, E.P. (1958). On the distribution of the roots of certain symmetric matrices. Ann. Math. 67:325–327. -- Original Wigner semicircle law"
        }
      ],
      "last_reviewed": "2026-05-06",
      "file": "cross-domain/ecology-mathematics/b-neutral-theory-random-matrix.yaml"
    },
    {
      "id": "b-phylogeography-coalescent-theory",
      "title": "The coalescent (Kingman 1982) bridges ecology and mathematics by providing a probabilistic framework for tracing gene genealogies backward in time ΓÇö enabling phylogeography to reconstruct population histories, out-of-Africa migration, and species range shifts from genetic data.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "Kingman's coalescent (1982) describes the stochastic process by which genetic lineages trace back to common ancestors. For a sample of n sequences, the rate of coalescence of the last pair from k lineages is C(k,2)/N_e where N_e is the effective population size. Expected time between k and k-1 linea",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-lgm-refugia-predict-phylogeographic-breaks-globally"
      ],
      "communication_gap": "Mathematical population geneticists who develop coalescent theory rarely engage with empirical ecologists who do species distribution modeling or biogeography. Phylogeographers use coalescent methods operationally (BEAST, IMa, msprime) without engaging with the mathematical derivations. Conservation biologists who identify management units for endangered species rarely use formal coalescent inference for defining evolutionarily significant units.\n",
      "translation_table": [
        {
          "field_a_term": "coalescent rate C(k,2)/N_e",
          "field_b_term": "ecological analog: contact rate between k lineages in population of size N_e",
          "note": "like predator-prey contact rate; N_e parameterizes the stochastic process"
        },
        {
          "field_a_term": "effective population size N_e",
          "field_b_term": "ecological carrying capacity under genetic drift (not census size)",
          "note": "N_e Γëê 4/3 ├ù census size for sexual diploids; bottlenecks dramatically reduce N_e"
        },
        {
          "field_a_term": "time to MRCA (E[T_MRCA] Γëê 2N_e)",
          "field_b_term": "coalescent depth measures historical population structure and bottlenecks",
          "note": "deeply diverged gene trees (large T_MRCA) indicate large ancestral N_e or subdivision"
        },
        {
          "field_a_term": "phylogeographic break",
          "field_b_term": "geographic discontinuity in gene tree topology correlating with landscape barriers",
          "note": "mountain ranges, rivers, and climate barriers leave signatures in gene genealogies"
        },
        {
          "field_a_term": "isolation-with-migration (IM) model",
          "field_b_term": "structured coalescent with post-divergence gene flow parameters",
          "note": "estimates when two populations split and how much gene flow occurred afterward"
        }
      ],
      "references": [
        {
          "doi": "10.1016/0304-4149(82)90011-4",
          "note": "Kingman (1982) The coalescent; Stoch Proc Appl 13:235"
        },
        {
          "doi": "10.1146/annurev.es.18.110187.002421",
          "note": "Avise et al. (1987) Intraspecific phylogeography ΓÇö the mitochondrial DNA bridge between population genetics and systematics; Annu Rev Ecol Syst 18:489"
        },
        {
          "doi": "10.1534/genetics.103.024182",
          "note": "Hey & Nielsen (2004) Multilocus methods for estimating population sizes, migration rates and divergence time; Genetics 167:395"
        },
        {
          "doi": "10.1038/325031a0",
          "note": "Cann et al. (1987) Mitochondrial DNA and human evolution; Nature 325:31"
        }
      ],
      "last_reviewed": "2026-05-07",
      "file": "cross-domain/ecology-mathematics/b-phylogeography-coalescent-theory.yaml"
    },
    {
      "id": "b-predator-prey-hopf-bifurcation",
      "title": "The Lotka-Volterra predator-prey equations undergo a Hopf bifurcation as carrying capacity increases, generating stable limit-cycle oscillations whose period and amplitude are analytically predictable from the Jacobian eigenvalues at the coexistence equilibrium",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "In the Rosenzweig-MacArthur model with prey carrying capacity K, the coexistence equilibrium undergoes a supercritical Hopf bifurcation at a critical K* where Re(lambda) = 0, predicting the paradox of enrichment: increasing productivity destabilizes the ecosystem and produces cycles of period T = 2*",
      "open_unknowns": [],
      "related_hypotheses": [],
      "communication_gap": "Ecologists fit population cycle data empirically while dynamical systems mathematicians analyze bifurcation diagrams; the explicit calculation of Hopf bifurcation parameters from measured ecological rates is rarely performed in the field ecology literature.",
      "translation_table": [
        {
          "field_a_term": "predator-prey coexistence equilibrium (N*, P*)",
          "field_b_term": "fixed point of the dynamical system where dN/dt = dP/dt = 0",
          "note": "Stability of fixed point determined by eigenvalues of the Jacobian J evaluated at (N*, P*)"
        },
        {
          "field_a_term": "paradox of enrichment",
          "field_b_term": "supercritical Hopf bifurcation at K*",
          "note": "Increasing K shifts Re(lambda) from negative (stable) to positive (unstable limit cycle); period T = 2pi/|Im(lambda)|"
        },
        {
          "field_a_term": "predator-prey cycle period (e.g., 9-11 yr Canadian lynx/hare cycle)",
          "field_b_term": "Im(lambda) at the Hopf point",
          "note": "Hare-lynx cycle period ~ 10 yr consistent with Im(lambda) ~ 0.6 yr^-1 in fitted RM models"
        },
        {
          "field_a_term": "functional response (Holling type II)",
          "field_b_term": "nonlinear saturation term a*N/(1 + a*h*N)",
          "note": "Saturating functional response is required for the Hopf bifurcation; linear functional response gives neutrally stable cycles only"
        }
      ],
      "references": [
        {
          "doi": "10.1086/282272",
          "note": "Rosenzweig & MacArthur (1963) Am Nat - Rosenzweig-MacArthur model and paradox of enrichment"
        },
        {
          "doi": "10.1126/science.171.3969.385",
          "note": "Rosenzweig (1971) Science - paradox of enrichment: destabilization of exploitation ecosystems"
        },
        {
          "doi": "10.2307/1940352",
          "note": "May (1972) Am Nat - stability and complexity in model ecosystems, bifurcation analysis"
        }
      ],
      "last_reviewed": "2026-05-07",
      "file": "cross-domain/ecology-mathematics/b-predator-prey-hopf-bifurcation.yaml"
    },
    {
      "id": "b-predator-prey-lotka-volterra-hamiltonian",
      "title": "The Lotka-Volterra predator-prey equations possess a conserved Hamiltonian H(x,y) = alpha*ln(y) - beta*y + gamma*ln(x) - delta*x, making predator-prey cycles mathematically equivalent to Hamiltonian mechanics, and the prey- predator ratio a conserved action variable that constrains long-term ecological dynamics.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "The Lotka-Volterra equations dx/dt = ax - bxy (prey), dy/dt = -cy + dxy (predator) admit the conserved quantity H = d*x - c*ln(x) + b*y - a*ln(y). This is a Hamiltonian system: the equations are Hamilton's equations with H as the Hamiltonian and (ln x, ln y) as conjugate phase-space coordinates. Eco",
      "open_unknowns": [],
      "related_hypotheses": [],
      "communication_gap": "Ecologists use Lotka-Volterra as a qualitative model and fit parameters phenomenologically without exploiting the Hamiltonian conservation law; mathematical physicists familiar with Hamiltonian mechanics rarely apply it to ecological dynamics. The fragility of Hamiltonian structure under ecological perturbations is rarely discussed in ecology courses.\n",
      "translation_table": [
        {
          "field_a_term": "conserved Hamiltonian H (mathematics)",
          "field_b_term": "ecological invariant determining predator-prey cycle amplitude (ecology)",
          "note": "H = d*x - c*ln(x) + b*y - a*ln(y); constant on each orbit; set by initial conditions"
        },
        {
          "field_a_term": "closed orbits in phase space (mathematics)",
          "field_b_term": "periodic predator-prey population cycles (ecology)",
          "note": "Lotka-Volterra cycles are closed (neutrally stable); no convergence to steady state"
        },
        {
          "field_a_term": "canonical transformation (mathematics)",
          "field_b_term": "coordinate change to (ln x, ln y) that makes equations Hamiltonian (ecology)",
          "note": "ln-transformation reveals Hamiltonian structure; standard ecological models rarely use this"
        },
        {
          "field_a_term": "action-angle variables (mathematics)",
          "field_b_term": "cycle period and amplitude as conserved quantities (ecology)",
          "note": "In the Hamiltonian formulation, cycle amplitude is the action variable"
        }
      ],
      "references": [
        {
          "doi": "10.1098/rsif.2013.0572",
          "note": "Hamiltonian structure of Lotka-Volterra equations; mathematical analysis"
        },
        {
          "doi": "10.2307/4100",
          "note": "Lotka (1925) - Elements of Physical Biology; original predator-prey equations"
        },
        {
          "doi": "10.1016/0022-5193(73)90121-7",
          "note": "May (1973) - Stability and Complexity in Model Ecosystems; Lotka-Volterra analysis"
        }
      ],
      "last_reviewed": "2026-05-07",
      "file": "cross-domain/ecology-mathematics/b-predator-prey-lotka-volterra-hamiltonian.yaml"
    },
    {
      "id": "b-reaction-diffusion-spatial-ecology",
      "title": "Spatial patterns in ecology (animal coat markings, vegetation bands, predator-prey patches) emerge from Turing reaction-diffusion instabilities, mapping ecological population dynamics onto the mathematics of activator-inhibitor systems.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "Turing's 1952 reaction-diffusion mechanism, in which a slowly diffusing activator and a rapidly diffusing inhibitor produce spontaneous spatial pattern from uniform conditions, maps directly onto spatial ecology: prey species (activator) and predator or resource competition (inhibitor) with differen",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-vegetation-stripe-turing-instability"
      ],
      "communication_gap": "Mathematicians study reaction-diffusion equations analytically while ecologists model spatial dynamics empirically; Turing patterns in ecology were only seriously proposed in the 1990s (Levin & Segel 1976 was ahead of its time) and the quantitative link between diffusion parameters and observed pattern wavelengths is rarely tested.\n",
      "translation_table": [
        {
          "field_a_term": "activator concentration u(x,t) (reaction-diffusion)",
          "field_b_term": "prey or plant biomass density (ecology)",
          "note": "Fast-growing, locally self-enhancing species plays the activator role"
        },
        {
          "field_a_term": "inhibitor concentration v(x,t) (reaction-diffusion)",
          "field_b_term": "predator density or soil water (ecology)",
          "note": "Slowly-reproducing, long-range suppressor plays the inhibitor role"
        },
        {
          "field_a_term": "Turing instability condition d > (b+d)^2/4ac (mathematics)",
          "field_b_term": "diffusion-driven instability in predator-prey systems (ecology)",
          "note": "Pattern emerges when inhibitor diffuses sufficiently faster than activator"
        },
        {
          "field_a_term": "pattern wavelength lambda proportional to sqrt(D/sigma)",
          "field_b_term": "spatial periodicity of vegetation stripes or animal coats",
          "note": "Observed stripe spacing in tiger bush vegetation matches Turing wavelength predictions"
        }
      ],
      "references": [
        {
          "doi": "10.1098/rstb.1952.0012",
          "note": "Turing (1952) - chemical basis of morphogenesis, original reaction-diffusion theory"
        },
        {
          "doi": "10.1086/282272",
          "note": "Levin & Segel (1976) - hypothesis for origin of plankton patchiness via diffusive instability"
        },
        {
          "doi": "10.1038/35078232",
          "note": "Klausmeier (1999) - regular and irregular patterns in semiarid vegetation"
        }
      ],
      "last_reviewed": "2026-05-07",
      "file": "cross-domain/ecology-mathematics/b-reaction-diffusion-spatial-ecology.yaml"
    },
    {
      "id": "b-replicator-dynamics-x-evolutionarily-stable-strategy-field-data",
      "title": "Replicator dynamics models bridge evolutionary game theory with empirical ecology by predicting frequency-dependent trait shifts under competition.",
      "source_domain": "",
      "target_domain": "",
      "status": "proposed",
      "bridge_claim": "Established mathematical framework links ESS conditions to rest points of replicator ODEs on strategy simplices; speculative analogy for field inference—finite-sample ecological time series rarely satisfy mean-field assumptions, so ESS predictions require explicit observation models and environmenta",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-replicator-residual-tests-improve-ess-prediction-under-competition"
      ],
      "communication_gap": "Mathematical ecology emphasizes equilibrium classification while field programs emphasize trait plasticity and climate forcing omitted from baseline replicator models.",
      "translation_table": [
        {
          "field_a_term": "strategy frequencies",
          "field_b_term": "phenotype abundances / morph frequencies",
          "note": "Discrete sampling biases inference."
        },
        {
          "field_a_term": "payoff matrix entries",
          "field_b_term": "fitness proxies from demographic rates",
          "note": "Fitness estimation error dominates model risk."
        },
        {
          "field_a_term": "interior fixed points",
          "field_b_term": "coexistence regimes",
          "note": "Stability classification depends on neglected spatial structure."
        }
      ],
      "references": [
        {
          "doi": "10.1016/0025-5564(78)90077-9",
          "note": "Evolutionarily stable strategies and game dynamics (Taylor–Jonker; replicator dynamics lineage)."
        }
      ],
      "last_reviewed": "2026-05-09",
      "file": "cross-domain/ecology-mathematics/b-replicator-dynamics-x-evolutionarily-stable-strategy-field-data.yaml"
    },
    {
      "id": "b-stochastic-population-extinction",
      "title": "The stochastic logistic model — adding demographic stochasticity (Brownian noise ∝ population size) to the deterministic logistic equation — yields a mean extinction time exponential in carrying capacity K, formalising the minimum viable population concept and underpinning IUCN Red List extinction risk categories through the mathematics of quasi-stationary distributions and Fokker-Planck diffusion.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "The deterministic logistic model dN/dt = rN(1-N/K) has a stable equilibrium at N=K. In a finite population, demographic stochasticity — random variation in individual birth and death events — drives fluctuations around K and ultimately causes extinction. The stochastic logistic model is:\n  dN = rN(1",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-extinction-time-exponential-k-demographic-stochasticity-confirmed"
      ],
      "communication_gap": "The stochastic diffusion approach to extinction (Lande, Kimura) is published in ecology and population genetics journals respectively, with limited cross-citation to the stochastic processes literature (Gardiner's Handbook, Van Kampen's Stochastic Processes). Conservation biologists using PVA software (VORTEX, RAMAS) implement these models without knowledge of the underlying mathematical theory. Mathematicians developing new results on quasi-stationary distributions rarely cite ecology literature.\n",
      "translation_table": [
        {
          "field_a_term": "Carrying capacity K (ecology)",
          "field_b_term": "Potential well depth in the extinction barrier — determines T_ext",
          "note": "T_ext ≈ exp(2rK/σ²)/r — small changes in K cause huge changes in persistence"
        },
        {
          "field_a_term": "Intrinsic growth rate r (population ecology)",
          "field_b_term": "Inverse of characteristic demographic time — sets timescale of fluctuations",
          "note": "High-r species recover faster from low abundance but same extinction mathematics applies"
        },
        {
          "field_a_term": "Demographic stochasticity σ² (individual birth/death variance)",
          "field_b_term": "Diffusion coefficient in Fokker-Planck equation B(N) = σ²N²",
          "note": "Demographic stochasticity scales as N² in population-level variance"
        },
        {
          "field_a_term": "Quasi-stationary distribution (QSD) of population size",
          "field_b_term": "Long-run population size distribution conditional on survival",
          "note": "The QSD characterises populations that persist for long times"
        },
        {
          "field_a_term": "Minimum viable population (MVP)",
          "field_b_term": "Threshold K below which T_ext < conservation time horizon",
          "note": "MVP is the inflection point in the exp(K) relationship"
        },
        {
          "field_a_term": "Extinction debt (delayed extinction from habitat loss)",
          "field_b_term": "Subcritical relaxation: population below new K but T_ext still long",
          "note": "The time lag between habitat loss and extinction can be centuries for large K"
        }
      ],
      "references": [
        {
          "doi": "10.1086/285580",
          "note": "Lande (1993) Risks of population extinction from demographic and environmental stochasticity and random catastrophes, Am Nat 142:911 — derives T_ext ≈ exp(2rK/σ²)/r for stochastic logistic model\n"
        },
        {
          "doi": "10.1016/j.tree.2010.05.008",
          "note": "Ovaskainen & Meerson (2010) Stochastic models of population extinction, Trends Ecol Evol 25:643 — reviews mathematical approaches to extinction\n"
        },
        {
          "doi": "10.1038/nature07265",
          "note": "Melbourne & Hastings (2008) Extinction risk depends strongly on factors contributing to stochasticity, Nature 454:100 — experimental test with beetles\n"
        },
        {
          "doi": "10.2307/1308256",
          "note": "Shaffer (1981) Minimum population sizes for species conservation, BioScience 31:131 — introduces MVP concept in conservation biology\n"
        }
      ],
      "last_reviewed": "2026-05-06",
      "file": "cross-domain/ecology-mathematics/b-stochastic-population-extinction.yaml"
    },
    {
      "id": "b-stochastic-population-master-equation",
      "title": "Stochastic population dynamics and the master equation — birth-death processes connect population ecology to statistical physics through shared probability flow mathematics",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "Deterministic population models (Lotka-Volterra, logistic) break down at small population sizes where demographic stochasticity dominates. The master equation governs probability flow: dP(n,t)/dt = Σ [T(n|n')P(n',t) - T(n'|n)P(n,t)] where T(n|n') is the transition rate from state n' to n and P(n,t) ",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-extinction-debt-master-equation-prediction"
      ],
      "communication_gap": "Population ecologists and evolutionary biologists largely work with differential equation models; the master equation formalism is standard in statistical physics but uncommon in ecology courses. Gardiner's Handbook of Stochastic Methods is the bridge text but is rarely cited in ecology. The connection between genetic drift and Brownian motion was recognized by Kimura (1964) but the broader master equation framework is underused in quantitative ecology.\n",
      "translation_table": [
        {
          "field_a_term": "master equation (probability flow over population states)",
          "field_b_term": "Kolmogorov forward equation (stochastic physics)",
          "note": "Identical PDE for probability density; difference is only in interpretation of states"
        },
        {
          "field_a_term": "birth rate b(n)",
          "field_b_term": "upward transition rate in physical Markov chain",
          "note": "Linear birth: b(n)=bn gives Poisson steady state; nonlinear b modifies distribution"
        },
        {
          "field_a_term": "demographic stochasticity (N finite effects)",
          "field_b_term": "shot noise in physical systems",
          "note": "Both scale as 1/√N; suppressed in thermodynamic limit but crucial at small N"
        },
        {
          "field_a_term": "extinction probability (first passage to n=0)",
          "field_b_term": "absorption probability in Markov chain with absorbing state",
          "note": "Exact formula: P_ext = (d/b)^N for simple birth-death process"
        },
        {
          "field_a_term": "genetic drift (Wright-Fisher diffusion)",
          "field_b_term": "Brownian motion in allele frequency space [0,1]",
          "note": "Diffusion coefficient D = p(1-p)/2N; same as physical diffusion in bounded domain"
        },
        {
          "field_a_term": "quasi-stationary distribution (conditional on non-extinction)",
          "field_b_term": "quasi-stationary state in metastable physical systems",
          "note": "Determines effective population size and evolutionary dynamics before extinction"
        }
      ],
      "references": [
        {
          "note": "Bartlett (1960) Stochastic Population Models (Methuen, ISBN 0306434512) — foundational ecological text"
        },
        {
          "note": "Gardiner (2004) Handbook of Stochastic Methods (Springer, ISBN 3540208828) — comprehensive physics treatment"
        },
        {
          "doi": "10.1093/acprof:oso/9780198525257.001.0001",
          "note": "Lande et al. (2003) Stochastic Population Dynamics in Ecology and Conservation"
        },
        {
          "note": "van Kampen (2007) Stochastic Processes in Physics and Chemistry (Elsevier, ISBN 0444893490) — definitive reference"
        }
      ],
      "last_reviewed": "2026-05-06",
      "file": "cross-domain/ecology-mathematics/b-stochastic-population-master-equation.yaml"
    },
    {
      "id": "b-vegetation-patterns-klausmeier-model",
      "title": "Regular spatial patterns in dryland vegetation (bands, spots, labyrinths) arise from a Turing instability in a reaction-diffusion PDE system where plant biomass activates water infiltration locally while water diffuses faster than plants, as described by the Klausmeier model ∂u/∂t = u^2*v - mu + d*∂^2u/∂x^2 and ∂v/∂t = a - v - u^2*v + ∂^2v/∂x^2",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "Klausmeier (1999) showed that vegetation-water feedbacks produce a reaction-diffusion system exhibiting Turing instability: plants (u) use water (v) and enhance local infiltration (positive feedback), while water diffuses faster than plants (D_v >> D_u); linear stability analysis shows the uniform s",
      "open_unknowns": [],
      "related_hypotheses": [],
      "communication_gap": "Ecologists observe vegetation patterns in remote sensing data and attribute them to grazing or hydrology while mathematicians study reaction-diffusion pattern formation; the Klausmeier model directly bridges both but is taught primarily in theoretical ecology and mathematical biology, rarely connecting to remote sensing or land management.",
      "translation_table": [
        {
          "field_a_term": "vegetation biomass density u(x,t) (ecology)",
          "field_b_term": "activator species in Turing reaction-diffusion system (mathematics)",
          "note": "Plants are the slow-diffusing activator; local plant density enhances water infiltration (self-activation)"
        },
        {
          "field_a_term": "soil water content v(x,t) (ecology)",
          "field_b_term": "inhibitor species in Turing RD system (mathematics)",
          "note": "Water is the fast-diffusing inhibitor; consumed by plants, depleted in vegetated patches"
        },
        {
          "field_a_term": "rainfall parameter a (ecology)",
          "field_b_term": "control parameter driving the Turing bifurcation (mathematics)",
          "note": "Decreasing a (drought) shifts the system through the Turing instability threshold a_c"
        },
        {
          "field_a_term": "vegetation band migration upslope in arid regions (ecology)",
          "field_b_term": "traveling wave solution of the Klausmeier PDE on an incline (mathematics)",
          "note": "Advection term in water equation creates traveling band patterns moving against the slope"
        }
      ],
      "references": [
        {
          "doi": "10.1126/science.284.5421.1826",
          "note": "Klausmeier (1999) Science - Regular stripes in arid vegetation: Turing mechanism in ecology"
        },
        {
          "doi": "10.1890/ES14-00305.1",
          "note": "Rietkerk & van de Koppel (2008) - regular pattern formation in real ecosystems"
        },
        {
          "doi": "10.1007/s11538-006-9138-5",
          "note": "van der Stelt et al. (2013) - stability analysis of Klausmeier vegetation model"
        }
      ],
      "last_reviewed": "2026-05-07",
      "file": "cross-domain/ecology-mathematics/b-vegetation-patterns-klausmeier-model.yaml"
    },
    {
      "id": "b-food-webs-cascade-dynamics",
      "title": "Ecological food webs as directed networks — trophic cascade dynamics as network percolation",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "Ecological food webs are directed weighted networks where nodes are species and edges represent trophic interactions (energy flow from prey to predator). Network structural properties predict ecosystem stability and cascade dynamics. Connectance C = L/S² (number of links L over species² S²) is the n",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-keystone-species-betweenness-predicts-cascade-risk"
      ],
      "communication_gap": "May (1973) introduced random graph theory to ecology in a landmark book, but most ecologists remained unfamiliar with network science until the early 2000s when Dunne, Williams and Martinez explicitly mapped food webs onto complex network theory. Financial network contagion modelling (Eisenberg-Noe 2001) developed independently with no awareness of the food web cascade literature. Both fields have since rediscovered each other's concepts but still rarely co-publish.\n",
      "translation_table": [
        {
          "field_a_term": "species (node)",
          "field_b_term": "node in food web network",
          "note": "Each species is a node; biomass and body size determine node 'weight'"
        },
        {
          "field_a_term": "trophic interaction (directed edge, prey → predator)",
          "field_b_term": "energy flow / biomass transfer rate (edge weight)",
          "note": "Directed edges from resource to consumer; weights proportional to energy transfer efficiency"
        },
        {
          "field_a_term": "connectance C = L/S²",
          "field_b_term": "network density / edge probability",
          "note": "May (1972): stability decreases with increasing connectance — analogous to percolation threshold"
        },
        {
          "field_a_term": "keystone species (high betweenness centrality)",
          "field_b_term": "hub node / articulation point",
          "note": "Removal disconnects major portions of the network; classic example: sea otters (Paine 1966)"
        },
        {
          "field_a_term": "trophic cascade (secondary extinction wave)",
          "field_b_term": "cascade failure / contagion propagation",
          "note": "Identical dynamics to cascade failures in power grids and financial networks"
        },
        {
          "field_a_term": "robustness to species loss",
          "field_b_term": "network resilience to node deletion",
          "note": "Dunne et al.: targeted hub deletion mimics Barabási-Albert robustness analysis exactly"
        }
      ],
      "references": [
        {
          "note": "May (1973) Stability and Complexity in Model Ecosystems. Princeton University Press.",
          "url": "https://press.princeton.edu/books/paperback/9780691088617/stability-and-complexity-in-model-ecosystems"
        },
        {
          "note": "Paine (1966) Am Nat 100:65 — keystone predator concept (sea otters and urchins)",
          "doi": "10.1086/282400"
        },
        {
          "doi": "10.1073/pnas.192407699",
          "note": "Dunne et al. (2002) PNAS 99:12917 — food web robustness via network theory"
        },
        {
          "doi": "10.1038/nature04927",
          "note": "Montoya et al. (2006) Nature 442:259 — ecological networks and cascade dynamics"
        }
      ],
      "last_reviewed": "2026-05-06",
      "file": "cross-domain/ecology-network-science/b-food-webs-cascade-dynamics.yaml"
    },
    {
      "id": "b-kelp-forest-trophic-cascade-amplification",
      "title": "Kelp forest trophic cascades — where sea otter removal triggers urchin population explosions that overgraze kelp — are network-theoretic cascade failures with amplification coefficients predictable from the interaction network's eigenvalue structure, making marine trophic dynamics a natural experiment in structured network fragility.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "proposed",
      "bridge_claim": "The classical kelp forest trophic cascade (Paine 1969; Estes & Palmisano 1974) demonstrates that removing a keystone predator (sea otter) can cause catastrophic regime shifts through indirect effects: otters eat urchins, urchins eat kelp, so otter removal → urchin explosion → kelp deforestation → bi",
      "open_unknowns": [],
      "related_hypotheses": [],
      "communication_gap": "",
      "translation_table": [
        {
          "field_a_term": "dominant eigenvalue lambda_max of community matrix",
          "field_b_term": "ecosystem stability threshold (May stability criterion)",
          "note": "lambda_max < 0 for stable community; removal of keystone increases lambda_max"
        },
        {
          "field_a_term": "network operator norm (resolvent)",
          "field_b_term": "cascade amplification factor",
          "note": "How much a trophic perturbation is amplified before it decays — determines cascade severity"
        },
        {
          "field_a_term": "hub node (high betweenness centrality)",
          "field_b_term": "keystone species (disproportionate ecosystem impact per unit biomass)",
          "note": "Both are defined by their network position, not their abundance"
        },
        {
          "field_a_term": "network percolation (node removal)",
          "field_b_term": "species extirpation and secondary extinctions",
          "note": "The cascade of secondary extinctions follows percolation theory on the food web"
        }
      ],
      "references": [
        {
          "doi": "10.1126/science.185.4156.1058",
          "note": "Estes & Palmisano (1974) — sea otters as keystone species in kelp forest communities"
        },
        {
          "doi": "10.1038/238413a0",
          "note": "May (1972) — will a large complex system be stable? (random matrix threshold)"
        },
        {
          "doi": "10.1038/ncomms12141",
          "note": "Rohr et al. (2014) — structural and functional heterogeneity of food web interaction strengths"
        }
      ],
      "last_reviewed": "2026-05-07",
      "file": "cross-domain/ecology-network-science/b-kelp-forest-trophic-cascade-amplification.yaml"
    },
    {
      "id": "b-mutualistic-nestedness-robustness",
      "title": "Mutualistic ecological networks (plant-pollinator, plant-seed disperser) exhibit nested architecture—where specialists interact only with subsets of generalists' partners—and this nestedness maximizes robustness to species extinction, quantified by the nestedness temperature T = 100*(1 - NODF/100) and linked to network connectivity through spectral theory",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "Nestedness in mutualistic networks arises from a core-periphery structure where the adjacency matrix A approaches a triangular/packed form; the nestedness metric NODF (Nestedness based on Overlap and Decreasing Fill) quantifies this as the fraction of paired rows/columns satisfying subset ordering; ",
      "open_unknowns": [],
      "related_hypotheses": [],
      "communication_gap": "Ecologists measure nestedness in field-collected plant-pollinator interaction matrices while network scientists develop structural metrics and robustness models in abstract graphs; the ecological insight that nestedness reflects coevolutionary assembly rules is rarely integrated with algebraic spectral analysis of the adjacency matrix.",
      "translation_table": [
        {
          "field_a_term": "nestedness of mutualistic network (ecology)",
          "field_b_term": "proximity of adjacency matrix to upper-triangular form (network science)",
          "note": "NODF = (1/N_pairs) * Σ min(k_i,k_j)/max(k_i,k_j) * (shared partners / min(k_i,k_j))"
        },
        {
          "field_a_term": "specialist-generalist asymmetry (ecology)",
          "field_b_term": "heterogeneous degree distribution with core-periphery structure (network science)",
          "note": "High-degree generalists form the core; specialists on the periphery preferentially interact with core nodes"
        },
        {
          "field_a_term": "robustness to secondary extinctions (ecology)",
          "field_b_term": "giant component persistence under targeted degree-based node removal (network science)",
          "note": "Nested networks are robust to random extinction but vulnerable to loss of generalists (hubs)"
        },
        {
          "field_a_term": "mutualistic strength asymmetry (ecology)",
          "field_b_term": "strength-degree correlation in weighted bipartite network (network science)",
          "note": "Mutualistic dependence coefficients are asymmetric; weak dependence of generalists on specialists maintains stability"
        }
      ],
      "references": [
        {
          "doi": "10.1038/nature09394",
          "note": "Bastolla et al. (2009) Nature - nestedness minimizes competition and maximizes biodiversity in mutualistic networks"
        },
        {
          "doi": "10.1126/science.1073381",
          "note": "Bascompte et al. (2003) Science - nested structure of plant-animal mutualistic networks"
        },
        {
          "doi": "10.1111/j.1600-0706.2010.18160.x",
          "note": "Almeida-Neto et al. (2008) - NODF: new and unambiguous measure of nestedness"
        }
      ],
      "last_reviewed": "2026-05-07",
      "file": "cross-domain/ecology-network-science/b-mutualistic-nestedness-robustness.yaml"
    },
    {
      "id": "b-mutualistic-networks-nestedness",
      "title": "Plant-pollinator and plant-seed disperser mutualistic networks exhibit characteristic nested architecture where specialists interact with subsets of generalist partners; this nestedness property, quantified identically in ecology and economic complexity networks, predicts robustness to extinction cascades and emerges from maximum entropy constraints on bipartite graphs.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "Plant-pollinator and plant-seed disperser networks are bipartite mutualistic networks with characteristic nested structure: specialists interact with subsets of what generalists interact with. Nestedness N = (observed - expected overlaps) / (max - expected). Highly nested networks are more robust to",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-nestedness-robustness-degree-heterogeneity-mediation"
      ],
      "communication_gap": "Ecologists and economists independently developed nestedness theory in the 1990s-2000s with no cross-citation. Bascompte's ecological nestedness work and Hidalgo's economic complexity work use mathematically identical bipartite network measures but appear in Nature/Science vs Journal of Economic Geography with no common reference community.\n",
      "translation_table": [
        {
          "field_a_term": "plant species (rows of bipartite matrix)",
          "field_b_term": "countries (rows of country-product matrix)",
          "note": "both are the \"specialists\" in their respective bipartite systems"
        },
        {
          "field_a_term": "pollinator species (columns)",
          "field_b_term": "exported products (columns)"
        },
        {
          "field_a_term": "NODF nestedness score",
          "field_b_term": "Economic Fitness Index (Tacchella et al. 2012)",
          "note": "both measure degree to which specialist sets are subsets of generalist sets"
        },
        {
          "field_a_term": "extinction cascade robustness",
          "field_b_term": "economic resilience to supply chain disruption",
          "note": "nested structure increases robustness in both domains via the same mechanism"
        },
        {
          "field_a_term": "mutualistic interaction strength",
          "field_b_term": "export volume / revealed comparative advantage"
        }
      ],
      "references": [
        {
          "doi": "10.1073/pnas.0307334100",
          "note": "Bascompte et al. (2003) PNAS 100:9383 — nested structure of mutualistic networks"
        },
        {
          "doi": "10.1038/nature07931",
          "note": "Bastolla et al. (2009) Nature 458:1018 — nestedness reduces interspecific competition"
        },
        {
          "note": "Almeida-Neto et al. (2008) Oikos 117:1227 — NODF nestedness metric"
        },
        {
          "doi": "10.1073/pnas.0900943106",
          "note": "Hidalgo & Hausmann (2009) PNAS 106:10570 — building blocks of economic complexity"
        }
      ],
      "last_reviewed": "2026-05-06",
      "file": "cross-domain/ecology-network-science/b-mutualistic-networks-nestedness.yaml"
    },
    {
      "id": "b-openalex-percolation-habitat-connectivity",
      "title": "Habitat connectivity in fragmented landscapes undergoes a percolation transition where a critical fragmentation threshold determines whether species can disperse across the entire landscape or are confined to isolated patches — the same universality class as bond percolation on a two-dimensional lattice.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "proposed",
      "bridge_claim": "Landscape ecology studies how habitat fragmentation affects species persistence and dispersal. Statistical physics provides the exact framework: a binary habitat map (habitat / non-habitat pixels) is formally a site-percolation problem on a 2D lattice. When the fraction of habitat h exceeds the perc",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-habitat-percolation-species-persistence"
      ],
      "communication_gap": "Landscape ecologists and conservation biologists rarely engage with the statistical physics percolation literature. Physicists working on percolation are unaware of the empirical literature on habitat fragmentation. The quantitative equivalence between site percolation and habitat connectivity was noted in theoretical ecology but has not been systematically exploited for field predictions or policy.\n",
      "translation_table": [
        {
          "field_a_term": "site occupation probability p (habitat fraction)",
          "field_b_term": "proportion of landscape covered by suitable habitat",
          "note": "The control parameter; h_c ≈ 0.593 for 2D square lattice site percolation"
        },
        {
          "field_a_term": "percolation threshold p_c",
          "field_b_term": "critical habitat fragmentation threshold for landscape connectivity",
          "note": "Below this threshold, no continuous dispersal corridor crosses the landscape"
        },
        {
          "field_a_term": "giant connected component",
          "field_b_term": "landscape-spanning habitat corridor",
          "note": "The single connected patch that crosses the entire landscape above threshold"
        },
        {
          "field_a_term": "correlation length xi ~ |p - p_c|^{-nu}",
          "field_b_term": "mean dispersal distance achievable near the fragmentation threshold",
          "note": "Diverges at criticality — near the threshold, local decisions have landscape-scale consequences"
        },
        {
          "field_a_term": "power-law patch size distribution",
          "field_b_term": "species-area relationship with universal exponents near fragmentation threshold",
          "note": "Predicts how many species survive as a function of remaining habitat fraction"
        }
      ],
      "references": [
        {
          "doi": "10.1002/sim.1186",
          "note": "Higgins et al. (2002) Quantifying heterogeneity in meta-analysis — 36,378 citations; statistical tools for ecological variance primary reference"
        },
        {
          "doi": "10.1890/0012-9658(2000)081[0736:PCLLC]2.0.CO;2",
          "note": "With & Crist (1995) — critical thresholds in landscape connectivity and percolation"
        },
        {
          "doi": "10.1103/PhysRevE.58.R5257",
          "note": "Stauffer & Aharony — introduction to percolation theory universality classes"
        }
      ],
      "last_reviewed": "2026-05-07",
      "file": "cross-domain/ecology-network-science/b-openalex-percolation-habitat-connectivity.yaml"
    },
    {
      "id": "b-soil-food-webs-network-trophic-theory",
      "title": "Soil food webs — multi-trophic networks of bacteria, fungi, nematodes, mites, and larger invertebrates — obey the same network-theoretic trophic level, connectance, and stability rules as above-ground food webs, but the prevalence of omnivory and detrital energy channels creates a distinct structural signature predictable by network flow analysis",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "Soil food web structure can be quantified using the same adjacency-matrix formalism as aquatic and terrestrial webs: Lotka-Volterra community matrices, Lindeman trophic efficiency, and May's connectance-complexity stability criterion all apply, but soil webs have higher connectance (C ~ 0.15 vs. 0.0",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-soil-food-web-connectance-stability"
      ],
      "communication_gap": "Soil ecologists catalogue species and feeding interactions in labor-intensive mesofauna surveys while network scientists apply graph metrics to published matrices; a shared database of standardized soil food webs (comparable to FoodWeb 1.0) is lacking, impeding cross-system meta-analysis.",
      "translation_table": [
        {
          "field_a_term": "bacterial-energy channel vs. fungal-energy channel in soil (ecology)",
          "field_b_term": "parallel flow paths in a flow network (network science)",
          "note": "Bacterial and fungal energy channels operate as alternative network pathways; their relative dominance sets nutrient cycling rates"
        },
        {
          "field_a_term": "nematode feeding guild (bacterivore, fungivore, predator) (ecology)",
          "field_b_term": "node trophic level and in/out-degree in directed graph (network science)",
          "note": "Feeding guild maps to trophic position; guild diversity determines topological diversity of the web"
        },
        {
          "field_a_term": "soil food web connectance C (ecology)",
          "field_b_term": "edge density of a directed graph (network science)",
          "note": "C = L / S^2 (links/species^2); above ~0.1 May's criterion predicts destabilization unless interaction strengths are weak"
        },
        {
          "field_a_term": "detrital pool and decomposer loop (ecology)",
          "field_b_term": "feedback cycle in network flow / recycling node (network science)",
          "note": "Detrital carbon re-enters the network via decomposers, creating long feedback loops that stabilize energy flow"
        }
      ],
      "references": [
        {
          "doi": "10.1126/science.256.5065.1820",
          "note": "De Ruiter et al. (1993) - calculating nitrogen mineralization from soil food web models"
        },
        {
          "doi": "10.1038/35004572",
          "note": "Hunt et al. (1987) - the detrital food web of a shortgrass prairie (pioneering quantitative soil web)"
        },
        {
          "doi": "10.1111/j.1461-0248.2006.00928.x",
          "note": "Rooney et al. (2006) - structural asymmetry and the stability of diverse food webs"
        }
      ],
      "last_reviewed": "2026-05-07",
      "file": "cross-domain/ecology-network-science/b-soil-food-webs-network-trophic-theory.yaml"
    },
    {
      "id": "b-trophic-cascades-network-motifs",
      "title": "Trophic cascades in food webs are structurally predicted by the prevalence of tri-trophic chain and apparent competition network motifs: ecosystems with high frequencies of cascade-amplifying motifs exhibit stronger top-down regulation of primary production\n",
      "source_domain": "",
      "target_domain": "",
      "status": "proposed",
      "bridge_claim": "Network motif analysis reveals that trophic cascade strength is not merely a function of predator biomass but of the topological prevalence of specific three- and four-node interaction patterns (tri-trophic chains, omnivory triangles, apparent competition modules) that either amplify or dampen top-d",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-food-web-motif-frequency-predicts-cascade-strength"
      ],
      "communication_gap": "Ecologists study trophic cascades through field experiments and species removal studies while network scientists analyse motif distributions in abstract graphs; few studies explicitly quantify motif frequencies in real food webs and link them to empirically measured cascade strength.\n",
      "translation_table": [
        {
          "field_a_term": "trophic level (ecology)",
          "field_b_term": "topological distance from a basal resource node in a directed interaction graph (network science)",
          "note": "Trophic level maps to graph-theoretic depth in the directed food web network"
        },
        {
          "field_a_term": "indirect top-down cascade (ecology)",
          "field_b_term": "multi-hop path effect in a signed directed network (network science)",
          "note": "Predator → prey → basal resource indirect suppression is a length-2 path in the network"
        },
        {
          "field_a_term": "trophic cascade strength (ecology)",
          "field_b_term": "summed product of signed edge weights along tri-trophic chain motifs (network science)",
          "note": "Cascade magnitude is proportional to the product of interaction strengths along dominant motif paths"
        },
        {
          "field_a_term": "omnivory / intraguild predation (ecology)",
          "field_b_term": "triangle motif with a feed-forward and feedback edge (network science)",
          "note": "Omnivory creates network cycles that typically dampen cascade strength"
        }
      ],
      "references": [
        {
          "doi": "10.1126/science.298.5594.824",
          "note": "Milo et al. (2002) - network motifs: simple building blocks of complex networks"
        },
        {
          "doi": "10.1126/science.1059820",
          "note": "Pace et al. (1999) - trophic cascades revealed in diverse ecosystems"
        }
      ],
      "last_reviewed": "2026-05-07",
      "file": "cross-domain/ecology-network-science/b-trophic-cascades-network-motifs.yaml"
    },
    {
      "id": "b-advection-diffusion-x-odor-plume-search",
      "title": "Odor cues in air and water combine advection by mean flow with turbulent diffusion — producing intermittent, filamentous concentration fields — governing search strategies of insects and crustaceans through statistics of encounter rates analogous to chemical engineer models of plume dispersion coefficients and Damköhler-type comparisons of advection to diffusion time scales.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "Concentration fields obey advection–diffusion–reaction PDEs; turbulent closures elevate effective diffusivity while preserving filamentary structure at intermediate Schmidt numbers. Odor-tracking animals sample these random concentration traces with antennae or chemosensory hairs; turning rules (Web",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-advection-diffusion-x-odor-plume-search"
      ],
      "communication_gap": "Fluid mechanics textbooks derive Gaussian plume solutions while ecology field studies report behavioral success rates without joint estimation of turbulent intensities at insect flight height.\n",
      "translation_table": [
        {
          "field_a_term": "Plume centerline meander (fluid dispersion)",
          "field_b_term": "Temporal odor intermittency encountered along animal trajectory",
          "note": "Same stochastic process sampled differently along Eulerian vs Lagrangian paths."
        },
        {
          "field_a_term": "Schmidt number Sc = ν/D (fluid mechanics)",
          "field_b_term": "Fine-scale odor filament thickness vs sensory spatial sampling footprint",
          "note": "High Sc yields sharp filaments requiring narrow sensory filters or temporal integration."
        },
        {
          "field_a_term": "Mean advection velocity U",
          "field_b_term": "Upwind bias speed needed for successful source ascent algorithms",
          "note": "Behavioral gain scales with predicted drift vs noise ratio analogous to SNR."
        }
      ],
      "references": [
        {
          "doi": "10.1073/pnas.0605277103",
          "note": "Murlis et al. tradition — odor-mediated flight trajectories with turbulent structure context"
        },
        {
          "doi": "10.1017/S0022112071000518",
          "note": "Csanady (1973) JFM cluster — atmospheric diffusion fundamentals cited in plume dispersion literature"
        }
      ],
      "last_reviewed": "2026-05-07",
      "file": "cross-domain/ecology-physics/b-advection-diffusion-x-odor-plume-search.yaml"
    },
    {
      "id": "b-ecosystem-resilience-bifurcation",
      "title": "Ecosystem regime shifts (lake eutrophication, savanna-forest, coral bleaching) are fold bifurcations (saddle-node) in nonlinear dynamical systems where hysteresis creates alternative stable states, and critical slowing down near the fold produces measurable early warning signals — rising autocorrelation and variance — validated empirically for 85 lake and fisheries transitions.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "Many ecosystems are bistable: they have two alternative stable states (clear/turbid lake, forest/savanna, coral/algae reef) separated by an unstable equilibrium. The dynamics are captured by dx/dt = f(x,p) where x is ecosystem state and p is a control parameter (nutrient loading, grazing pressure, t",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-critical-slowing-down-universal-ews-ecosystem-tipping-fold-bifurcation"
      ],
      "communication_gap": "Ecology's resilience concept (Holling 1973) was formulated qualitatively decades before bifurcation theory entered ecological modelling. Applied ecologists managing fisheries and lakes rarely read nonlinear dynamics journals; theoretical physicists working on bifurcation theory do not read ecology journals. The Scheffer group (Wageningen) is one of the few that actively bridges both communities. Policy-makers managing ecosystems are generally unaware of EWS methods; even where EWS are computed they are rarely used for proactive management decisions.\n",
      "translation_table": [
        {
          "field_a_term": "fold (saddle-node) bifurcation at parameter p_c",
          "field_b_term": "tipping point — threshold of no return in ecosystem management",
          "note": "Fold bifurcations are generic (codimension-1) — mathematically inevitable in bistable systems"
        },
        {
          "field_a_term": "hysteresis loop (different p needed for transition vs. recovery)",
          "field_b_term": "irreversibility of eutrophication / deforestation",
          "note": "Hysteresis width = management cost of restoration relative to prevention"
        },
        {
          "field_a_term": "dominant eigenvalue λ₁ → 0 near bifurcation",
          "field_b_term": "critical slowing down — ecosystem recovery time diverges",
          "note": "Measured as τ_recovery = -1/λ₁; detectable 10+ years before the transition"
        },
        {
          "field_a_term": "variance σ² ∝ (p_c - p)^(-γ) diverges near fold",
          "field_b_term": "increasing variance in lake turbidity / chlorophyll before eutrophication",
          "note": "Fluctuation-dissipation theorem connects variance and relaxation time"
        },
        {
          "field_a_term": "alternative stable states in potential V(x) = -∫f(x)dx",
          "field_b_term": "Holling's resilience — depth of basin of attraction around stable state",
          "note": "Basin depth = ΔV; Holling's concept is exactly the potential well in the normal form"
        }
      ],
      "references": [
        {
          "doi": "10.1038/269471a0",
          "note": "May (1977) Thresholds and breakpoints in ecosystems with a multiplicity of stable states. Nature 269:471"
        },
        {
          "doi": "10.1038/nature08227",
          "note": "Scheffer et al. (2009) Early-warning signals for critical transitions. Nature 461:53"
        },
        {
          "doi": "10.1371/journal.pone.0041010",
          "note": "Dakos et al. (2012) Methods for detecting early warnings of critical transitions in time series illustrated using simulated ecological data. PLoS ONE 7:e41010"
        },
        {
          "note": "Strogatz (1994) Nonlinear Dynamics and Chaos. Addison-Wesley"
        }
      ],
      "last_reviewed": "2026-05-06",
      "file": "cross-domain/ecology-physics/b-ecosystem-resilience-bifurcation.yaml"
    },
    {
      "id": "b-forest-canopy-beer-lambert-radiative",
      "title": "Light extinction through a forest canopy follows a modified Beer-Lambert law: PAR irradiance decreases exponentially with cumulative leaf area index I(L) = I_0 exp(-k·L), where the extinction coefficient k depends on leaf angle distribution and solar zenith angle, connecting plant canopy ecology to radiative transfer theory\n",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "Photosynthetically active radiation (PAR) through a plant canopy is attenuated according to I(z) = I_0 exp(-k · LAI(z)), directly analogous to Beer-Lambert attenuation of light in an absorbing medium with extinction coefficient k (= G(θ)/cos(θ) where G is the projection function of leaf angles and θ",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-clumping-index-primary-productivity-underestimate"
      ],
      "communication_gap": "Forest ecologists measure LAI and light interception through hemispherical photography and PAR sensors while radiative transfer physicists model photon transport in turbid media; the clumping correction to Beer-Lambert for non-random leaf distributions and the anisotropic scattering of photons by leaves are not routinely incorporated in ecological light models.\n",
      "translation_table": [
        {
          "field_a_term": "leaf area index LAI = ∫leaf area dz per unit ground area (ecology)",
          "field_b_term": "optical depth τ = k·L of an absorbing layer (optics)",
          "note": "LAI plays the role of optical depth; each unit of LAI attenuates PAR by factor exp(-k) where k ≈ 0.5 for spherical leaf distribution"
        },
        {
          "field_a_term": "canopy light extinction coefficient k (ecology)",
          "field_b_term": "absorption cross-section per unit leaf area in the Beer-Lambert law (optics)",
          "note": "k = G(θ_sun)/cos(θ_sun) where G is the mean projection of leaf area onto a horizontal plane"
        },
        {
          "field_a_term": "canopy gap fraction P(θ) (ecology)",
          "field_b_term": "transmittance of the canopy layer for a given solar angle (optics)",
          "note": "P(θ) = exp(-G(θ)·LAI/cos(θ)) is the probability of a photon at solar zenith angle θ penetrating the canopy without interception"
        },
        {
          "field_a_term": "leaf clumping index Ω (ecology)",
          "field_b_term": "departure from random spatial distribution in Beer-Lambert effective LAI (optics)",
          "note": "Clumped leaves (Ω < 1) transmit more light than random distribution; effective LAI = Ω·LAI in modified Beer-Lambert"
        }
      ],
      "references": [
        {
          "doi": "10.1016/0168-1923(87)90061-4",
          "note": "Norman & Jarvis (1975) - photosynthesis in Sitka spruce — canopy structure and light interception"
        },
        {
          "doi": "10.1016/j.agrformet.2008.12.003",
          "note": "Chen et al. (1997) - leaf area index of boreal forests: theory, techniques, and measurements"
        }
      ],
      "last_reviewed": "2026-05-07",
      "file": "cross-domain/ecology-physics/b-forest-canopy-beer-lambert-radiative.yaml"
    },
    {
      "id": "b-forest-fire-self-organized-criticality",
      "title": "Forest fire frequency-area distributions follow a power law P(A) ~ A^{−β} with β ≈ 1.3–1.5, consistent with Bak-Tang-Wiesenfeld self-organized criticality (SOC): forests spontaneously evolve to a critical state where perturbations (lightning) cause cascading fires of all sizes without external parameter tuning.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "Bak, Tang & Wiesenfeld (1987) introduced the sandpile automaton as the prototype SOC system: local collapse rules cause avalanches of all sizes, P(s) ~ s^{-3/2}, without tuning any parameter. The forest fire model (Drossel & Schwabl 1992) is an SOC system where trees grow slowly, lightning strikes r",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-forest-fire-soc-beta-exponent-climate-invariance"
      ],
      "communication_gap": "Forest ecologists and fire managers focus on species composition, fire weather, and suppression tactics; statistical physicists studying SOC rarely engage with practical fire management. Malamud et al. (1998) established the empirical connection but the SOC framework is not routinely used in wildfire risk assessment by land management agencies.\n",
      "translation_table": [
        {
          "field_a_term": "Forest fire area distribution P(A) ~ A^{−1.3}",
          "field_b_term": "SOC avalanche size distribution P(s) ~ s^{-β}",
          "note": "Both power laws lack a characteristic scale; both arise from a critical state"
        },
        {
          "field_a_term": "Tree growth (slow process) + lightning strike (rare driving)",
          "field_b_term": "Slow sandpile grain addition + toppling cascade",
          "note": "Separation of time scales (slow drive, fast cascade) is the hallmark of SOC"
        },
        {
          "field_a_term": "Suppression of small fires → increased large fire risk",
          "field_b_term": "Subcritical sand accumulation → larger avalanches when released",
          "note": "Fire suppression policy moves the forest away from SOC; analogous to subcritical accumulation"
        },
        {
          "field_a_term": "Conifer fuel load (biomass per unit area)",
          "field_b_term": "Local slope / stress in the sandpile; drives the cascade",
          "note": "High fuel load = steep local slope; fire spreads when fuel load exceeds ignition threshold"
        }
      ],
      "references": [
        {
          "doi": "10.1103/PhysRevLett.59.381",
          "note": "Bak, Tang & Wiesenfeld (1987) PRL – self-organized criticality: an explanation of 1/f noise"
        },
        {
          "doi": "10.1126/science.281.5384.1840",
          "note": "Malamud et al. (1998) Science – forest fires: an example of self-organized critical behavior"
        },
        {
          "doi": "10.1103/PhysRevE.46.1829",
          "note": "Drossel & Schwabl (1992) PRE – self-organised critical forest-fire model"
        },
        {
          "doi": "10.1073/pnas.0911553106",
          "note": "Pueyo et al. (2010) PNAS – testing self-organized criticality in global forest fire data"
        }
      ],
      "last_reviewed": "2026-05-07",
      "file": "cross-domain/ecology-physics/b-forest-fire-self-organized-criticality.yaml"
    },
    {
      "id": "b-island-biogeography-percolation",
      "title": "Island biogeography's species-area relationship reflects percolation of colonization across habitat — habitat fragmentation is a percolation phase transition",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "MacArthur and Wilson's species-area relationship S = cA^z (z ≈ 0.25) reflects the percolation structure of colonization across fragmented habitat. Below a critical habitat area A_c, connectivity drops below the percolation threshold and species cannot maintain viable metapopulations — the extinction",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-habitat-percolation-z-exponent"
      ],
      "communication_gap": "Conservation biology and ecological theory rarely engage with statistical physics literature; percolation theory developed without awareness of the species-area problem. With & Crist (1995) established the connection but it remains underutilized in applied conservation planning.\n",
      "translation_table": [
        {
          "field_a_term": "island area A (or habitat patch size)",
          "field_b_term": "system size in percolation (number of bonds)",
          "note": "Species richness scales with area as S = cA^z; percolation cluster size scales with p"
        },
        {
          "field_a_term": "species richness S",
          "field_b_term": "size of the largest connected percolation cluster",
          "note": "Both show power-law scaling near critical threshold"
        },
        {
          "field_a_term": "critical habitat area A_c (minimum viable metapopulation)",
          "field_b_term": "percolation threshold p_c",
          "note": "Below A_c / p_c, no spanning cluster exists — collapse of persistence"
        },
        {
          "field_a_term": "colonization/extinction balance",
          "field_b_term": "bond occupation probability in percolation",
          "note": "Immigration balances extinction at equilibrium species richness"
        },
        {
          "field_a_term": "species-area exponent z ≈ 0.25",
          "field_b_term": "critical exponent of percolation cluster (beta/nu ≈ 0.1)",
          "note": "Both reflect fractal geometry of the spatial structure"
        }
      ],
      "references": [
        {
          "note": "MacArthur, R.H. & Wilson, E.O. (1967). The Theory of Island Biogeography. Princeton University Press."
        },
        {
          "doi": "10.2307/1934198",
          "note": "Preston (1962). The canonical distribution of commonness and rarity. Ecology 43:185."
        },
        {
          "doi": "10.2307/1938559",
          "note": "With & Crist (1995). Critical thresholds in species responses to landscape structure. Ecology 76:2446."
        },
        {
          "note": "Hanski, I. (1999). Metapopulation Ecology. Oxford University Press."
        }
      ],
      "last_reviewed": "2026-05-06",
      "file": "cross-domain/ecology-physics/b-island-biogeography-percolation.yaml"
    },
    {
      "id": "b-metabolic-scaling-fractal-networks",
      "title": "Fractal vascular network geometry ↔ ¾-power metabolic scaling law — West-Brown-Enquist theory",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "West, Brown & Enquist (1997) derived Kleiber's empirical ¾-power metabolic scaling law B ∝ M^(3/4) from first principles using the fractal geometry of biological distribution networks (vascular, bronchial, plant xylem). The derivation assumes three constraints: (1) the network is space-filling (frac",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-fractal-network-universal-life-history-clock"
      ],
      "communication_gap": "The West-Brown-Enquist paper appeared in Science but was largely developed by physicists (West) in collaboration with ecologists (Brown, Enquist). Mainstream ecology journals were initially skeptical — many ecologists distrusted the level of mathematical abstraction. Subsequent debates about deviations from ¾ scaling (some taxa show 2/3 exponents) have been productive but are conducted largely in separate literatures (ecology vs biophysics) with limited cross-citation.\n",
      "translation_table": [
        {
          "field_a_term": "space-filling fractal network (D_f = 3)",
          "field_b_term": "body mass M (mass scales as L³)",
          "note": "The fractal must fill three-dimensional space to supply every cell; this fixes the exponent"
        },
        {
          "field_a_term": "area-preserving branching rule (∑ rᵢ² = r_{parent}²)",
          "field_b_term": "Murray's law minimising transport resistance",
          "note": "Optimal branching geometry from fluid dynamics; confirmed in vascular anatomy"
        },
        {
          "field_a_term": "invariant terminal unit (capillary/alveolus size)",
          "field_b_term": "cell size near-invariance across species",
          "note": "The boundary condition: all organisms service the same capillary-to-cell geometry"
        },
        {
          "field_a_term": "fractal branching exponent a = 1/3 (radius ratio)",
          "field_b_term": "metabolic scaling exponent β = 3/4",
          "note": "β = D_f/(D_f+1) = 3/4 for D_f = 3 — pure geometry determines the exponent"
        },
        {
          "field_a_term": "number of branching generations N ~ ln(M)",
          "field_b_term": "organismal lifespan scaling ∝ M^{1/4}",
          "note": "The same network depth parameter predicts multiple life-history traits"
        },
        {
          "field_a_term": "pulsatile vs steady flow transition",
          "field_b_term": "heart rate ∝ M^{-1/4}",
          "note": "Womersley number analysis of the branching network predicts the heart rate exponent"
        }
      ],
      "references": [
        {
          "doi": "10.1126/science.276.5309.122",
          "note": "West, Brown & Enquist (1997) Science 276:122 — derivation of ¾-power law"
        },
        {
          "note": "Kleiber (1932) Hilgardia 6:315 — the original empirical ¾ scaling observation",
          "url": "https://hilgardia.ucanr.edu/fileaccess.cfm?article=152208&p=FBHOJH"
        },
        {
          "doi": "10.1126/science.284.5420.1677",
          "note": "West et al. (1999) Science 284:1677 — life history and growth from network model"
        },
        {
          "doi": "10.1111/j.0269-8463.2004.00856.x",
          "note": "Savage et al. (2004) Funct Ecol 18:257 — tests and critiques of the WBE model"
        }
      ],
      "last_reviewed": "2026-05-06",
      "file": "cross-domain/ecology-physics/b-metabolic-scaling-fractal-networks.yaml"
    },
    {
      "id": "b-neutral-theory-random-walks",
      "title": "Hubbell's neutral theory of biodiversity is mathematically equivalent to Kimura's neutral theory of molecular evolution and the voter model in statistical physics: all three describe random drift on a simplex, producing species abundance distributions as zero-sum multinomials (random walks on composition space).\n",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "Hubbell (2001) unified neutral theory: all J individuals in a community are demographically equivalent regardless of species identity. Birth, death, speciation (rate ν), and immigration (rate m) drive stochastic dynamics. The zero-sum multinomial (ZSM) distribution of species abundances emerges from",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-nestedness-robustness-degree-heterogeneity-mediation"
      ],
      "communication_gap": "Hubbell (2001) was aware of analogies to neutral evolution but did not cite the voter model literature or Kimura's exact mathematical formalism. Volkov et al. (2003) derived the analytical solution using Kolmogorov equations standard in physics but unusual in ecology. The voter model literature in physics (Sood & Redner, Castellano et al. 2005) developed independently with no citation of Hubbell despite solving the same equations.\n",
      "translation_table": [
        {
          "field_a_term": "community size J (individuals in local community)",
          "field_b_term": "population size N (individuals in genetic drift theory)",
          "note": "both determine the strength of drift vs drift-immigration balance"
        },
        {
          "field_a_term": "speciation rate ν (probability an offspring is a new species)",
          "field_b_term": "mutation rate μ per generation in molecular neutral theory",
          "note": "fundamental biodiversity number θ = 2Jν ↔ 4Nμ population genetics"
        },
        {
          "field_a_term": "immigration rate m (individuals from metacommunity)",
          "field_b_term": "gene flow rate between subpopulations",
          "note": "both create immigration-drift balance determining local diversity"
        },
        {
          "field_a_term": "zero-sum multinomial species abundance distribution",
          "field_b_term": "Ewens sampling formula in population genetics",
          "note": "identical mathematical object; derived independently in both fields"
        },
        {
          "field_a_term": "metacommunity (regional species pool)",
          "field_b_term": "infinite-population source of ancestral alleles",
          "note": "both play the role of a boundary condition determining long-run equilibrium"
        }
      ],
      "references": [
        {
          "note": "Hubbell (2001) — The Unified Neutral Theory of Biodiversity and Biogeography; Princeton UP"
        },
        {
          "doi": "10.1038/nature02013",
          "note": "Volkov et al. (2003) — Neutral theory and relative species abundance in ecology; Nature 424:1035"
        },
        {
          "doi": "10.1038/217624a0",
          "note": "Kimura (1968) — Evolutionary rate at the molecular level; Nature 217:624"
        },
        {
          "doi": "10.1111/j.1461-0248.2004.00607.x",
          "note": "Etienne & Olff (2004) — How dispersal limitation shapes species-body size distributions in local communities; Ecol Lett 7:170"
        }
      ],
      "last_reviewed": "2026-05-06",
      "file": "cross-domain/ecology-physics/b-neutral-theory-random-walks.yaml"
    },
    {
      "id": "b-nutrient-cycling-stoichiometry",
      "title": "Ecological stoichiometry bridges ecology and chemistry: the Redfield ratio (C:N:P = 106:16:1) reveals that ocean chemistry and phytoplankton biochemistry have co-evolved toward elemental homeostasis, and Liebig's law of the minimum connects nutrient limitation to growth rates via the physics of diffusion-limited resource acquisition.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "Ecological stoichiometry (Sterner & Elser 2002) is the study of the balance of chemical elements in ecological interactions. It unifies ecological dynamics with the conservation of matter: organisms require multiple elements in specific ratios, and imbalances between supply and demand drive competit",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-growth-rate-hypothesis-ribosome-phosphorus-universality"
      ],
      "communication_gap": "Stoichiometry is a concept from chemistry; ecology traditionally studied species interactions, population dynamics, and energy flow without systematic accounting of elemental mass balances. The Redfield ratio (1958) was a marine chemistry observation; its ecological implications were developed by Elser and Sterner (1992-2002) three decades later. Limnologists studying freshwater lakes and marine oceanographers studying ocean biogeochemistry developed separate bodies of stoichiometric theory that were only synthesized by Sterner & Elser (2002). Ecological stoichiometry remains underrepresented in ecology textbooks relative to its explanatory power for nutrient cycling, eutrophication, and community ecology.\n",
      "translation_table": [
        {
          "field_a_term": "Liebig's law of the minimum (limiting nutrient)",
          "field_b_term": "bottleneck in a multi-step production process (operations research)",
          "note": "both identify the scarcest resource as the rate-limiting factor in a multi-requirement system"
        },
        {
          "field_a_term": "Redfield ratio C:N:P = 106:16:1 (marine phytoplankton)",
          "field_b_term": "chemical formula / molar ratio of a compound (chemistry)",
          "note": "Redfield ratio is essentially the average molecular formula of marine phytoplankton biomass"
        },
        {
          "field_a_term": "stoichiometric homeostasis (animal maintains fixed C:N:P)",
          "field_b_term": "regulation / setpoint control (control theory, physiological homeostasis)",
          "note": "homeostatic organisms regulate body composition like a thermostat — feedback prevents drift"
        },
        {
          "field_a_term": "growth rate hypothesis (high growth → high P:C ratio via rRNA)",
          "field_b_term": "metabolic scaling theory (growth rate scales with cellular machinery allocation)",
          "note": "connects molecular biochemistry (ribosome stoichiometry) to ecosystem element cycling"
        },
        {
          "field_a_term": "eutrophication (excess N and P → algal blooms)",
          "field_b_term": "stoichiometric imbalance (N:P < 10 favoring N-fixers over N-limited species)",
          "note": "N:P ratio determines community composition via competitive advantage — a stoichiometric selector"
        }
      ],
      "references": [
        {
          "note": "Sterner & Elser (2002) Ecological Stoichiometry: The Biology of Elements from Molecules to the Biosphere. Princeton University Press"
        },
        {
          "doi": "10.1511/2058.343.205",
          "note": "Redfield (1958) The biological control of chemical factors in the environment; Am Sci 46:205"
        },
        {
          "note": "Liebig (1840) Organic Chemistry in its Applications to Agriculture and Physiology. Taylor and Walton, London"
        },
        {
          "doi": "10.2307/3545680",
          "note": "Elser et al. (1996) Organism size, life history, and N:P stoichiometry; Oikos 77:611"
        }
      ],
      "last_reviewed": "2026-05-06",
      "file": "cross-domain/ecology-physics/b-nutrient-cycling-stoichiometry.yaml"
    },
    {
      "id": "b-oceanic-turbulence-mixing",
      "title": "Kolmogorov turbulence theory and Munk-Wunsch mixing budgets bridge fluid physics to oceanic ecology — diapycnal diffusivity sets the nutrient supply and climate memory of the deep ocean",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "Ocean mixing is the bridge between turbulence physics and marine ecology/climate. The diapycnal diffusivity κ = Γε/N² (Osborn 1980) links the turbulent kinetic energy dissipation rate ε (measurable by microstructure profilers) to vertical mixing across density surfaces — the pathway by which deep nu",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-tidal-internal-wave-mixing-abyssal-hotspots"
      ],
      "communication_gap": "Physical oceanographers and marine ecologists attend different conferences (Ocean Sciences vs. ASLO Aquatic Sciences) and rarely co-author. Turbulence physicists studying Kolmogorov scaling rarely engage with biogeochemical implications; marine ecologists measuring nutrient fluxes rarely use microstructure turbulence measurements. Climate modelers parameterize mixing empirically without input from either group.\n",
      "translation_table": [
        {
          "field_a_term": "turbulent kinetic energy dissipation ε (physics)",
          "field_b_term": "vertical nutrient flux (marine ecology)",
          "note": "ε measured by microstructure probes predicts diapycnal nitrate supply to phytoplankton"
        },
        {
          "field_a_term": "buoyancy frequency N (stratification, physics)",
          "field_b_term": "pycnocline / thermocline strength (oceanography)",
          "note": "High N means strong stratification — low mixing — limiting nutrient supply to surface"
        },
        {
          "field_a_term": "Kolmogorov microscale η (turbulence physics)",
          "field_b_term": "bacterial cell size / turbulence avoidance (microbial ecology)",
          "note": "Microbes smaller than η experience smooth (non-turbulent) flow — critical for chemotaxis and motility"
        },
        {
          "field_a_term": "Langmuir circulation cells (wind-wave-current physics)",
          "field_b_term": "plankton patchiness (biological oceanography)",
          "note": "Langmuir cells concentrate buoyant plankton in windrows — determines microscale ecological patchiness"
        },
        {
          "field_a_term": "thermohaline circulation (physical oceanography)",
          "field_b_term": "global nutrient cycling / carbon sequestration (biogeochemistry)",
          "note": "The overturning circulation transports heat, carbon, and nutrients on millennial timescales"
        }
      ],
      "references": [
        {
          "doi": "10.1016/S0967-0637(98)00070-8",
          "note": "Munk & Wunsch (1998). Abyssal recipes II: Energetics of tidal and wind mixing. Deep Sea Res 45:1977."
        },
        {
          "doi": "10.1175/1520-0485(1980)010<0083:OTLETO>2.0.CO;2",
          "note": "Osborn (1980). Estimates of the local rate of vertical diffusion from dissipation measurements. J Phys Oceanogr 10:83."
        },
        {
          "doi": "10.1017/S0022112096008439",
          "note": "McWilliams, Sullivan & Moeng (1997). Langmuir turbulence in the ocean. J Fluid Mech 334:1."
        },
        {
          "doi": "10.1146/annurev.fluid.36.050802.122121",
          "note": "Wunsch & Ferrari (2004). Vertical mixing, energy, and the general circulation of the oceans. Annu Rev Fluid Mech 36:281."
        }
      ],
      "last_reviewed": "2026-05-06",
      "file": "cross-domain/ecology-physics/b-oceanic-turbulence-mixing.yaml"
    },
    {
      "id": "b-seed-dispersal-levy-flight",
      "title": "Seed dispersal kernels follow truncated Lévy distributions: the power-law tail of rare long-distance dispersal events is mathematically equivalent to Lévy flight foraging",
      "source_domain": "",
      "target_domain": "",
      "status": "proposed",
      "bridge_claim": "Seed dispersal kernels p(r) — the probability that a seed lands at distance r from the parent — often follow fat-tailed distributions with p(r)~r^(−α) for large r (1<α<3), rather than thin-tailed Gaussians assumed by classical diffusion. This power-law tail is mathematically equivalent to the step-l",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-seed-dispersal-levy-flight"
      ],
      "communication_gap": "Ecologists who measure dispersal kernels typically fit log-normal or 2Dt distributions without connecting to stable distribution theory or Lévy flight literature from statistical physics. The mathematical equivalence means existing Lévy flight results (anomalous diffusion scaling, optimal search theory) directly apply to ecology but are rarely cited in plant dispersal papers.\n",
      "translation_table": [
        {
          "field_a_term": "seed dispersal kernel p(r) (ecology)",
          "field_b_term": "step-length distribution f(ℓ) in a Lévy flight (statistical physics)",
          "note": "Both characterized by tail exponent α; LDD events = long Lévy steps"
        },
        {
          "field_a_term": "long-distance dispersal (LDD) event fraction",
          "field_b_term": "probability of a superdiffusive step exceeding threshold ℓ₀",
          "note": "LDD fraction determines the velocity of range expansion and genetic mixing"
        },
        {
          "field_a_term": "plant colonization front speed",
          "field_b_term": "anomalous diffusion front speed v_front ∝ D_Lévy^(1/α)",
          "note": "Lévy-driven fronts are faster than Gaussian-diffusion fronts by a factor depending on α"
        },
        {
          "field_a_term": "dispersal agent (bird gut retention time, wind turbulence)",
          "field_b_term": "physical mechanism generating the power-law step distribution",
          "note": "Gut retention time distribution and wind speed distribution both generate fat-tailed dispersal"
        }
      ],
      "references": [
        {
          "doi": "10.1086/285551",
          "note": "Clark (1998) Why trees migrate so fast — confronting theory with dispersal biology. Am Nat 152:204"
        },
        {
          "doi": "10.1038/44095",
          "note": "Viswanathan et al. (1999) Optimizing the success of random searches. Nature 401:911"
        },
        {
          "doi": "10.1086/670006",
          "note": "Kot et al. (1996) Dispersal data and the spread of invading organisms. Ecology 77:2027"
        }
      ],
      "last_reviewed": "2026-05-07",
      "file": "cross-domain/ecology-physics/b-seed-dispersal-levy-flight.yaml"
    },
    {
      "id": "b-trophic-cascades-phase-transitions",
      "title": "Trophic cascades triggered by apex predator removal are fold bifurcations (saddle-node) in ecosystem dynamical systems — the same mathematics as all ecological tipping points",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "Trophic cascades — propagation of population changes from apex predators down through herbivore and primary producer trophic levels — represent transitions between multiple stable ecosystem states. The system possesses two stable fixed points (predator-present and predator-absent states) separated b",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-trophic-cascade-early-warning-signals-critical-slowing-down"
      ],
      "communication_gap": "Ecologists describe trophic cascades in species-interaction language while physicists describe bifurcations in dynamical systems language. The mathematical identity was first formalised by Scheffer et al. (2001) but remains underutilised in conservation biology practice, where the nonlinear dynamical systems framing is rarely taught.\n",
      "translation_table": [
        {
          "field_a_term": "apex predator density",
          "field_b_term": "control parameter (bifurcation parameter)",
          "note": "Predator abundance plays the role of the control parameter crossing a tipping point"
        },
        {
          "field_a_term": "ecosystem with/without top predator",
          "field_b_term": "two stable fixed points / alternative stable states",
          "note": "Both correspond to stable equilibria of the dynamical system"
        },
        {
          "field_a_term": "trophic cascade threshold",
          "field_b_term": "saddle-node bifurcation point",
          "note": "The critical predator density below which the system transitions"
        },
        {
          "field_a_term": "behaviour modification by prey (landscape of fear)",
          "field_b_term": "nonlinear feedback loop",
          "note": "Indirect effects arise from nonlinear coupling between trophic levels"
        }
      ],
      "references": [
        {
          "note": "Estes et al. (2011) — trophic downgrading of planet Earth",
          "doi": "10.1126/science.1205106"
        },
        {
          "note": "Ripple & Beschta (2004) — wolves, elk, and trophic cascades in Yellowstone",
          "doi": "10.1641/0006-3568(2004)054[0755:WEATCI]2.0.CO;2"
        },
        {
          "note": "Scheffer et al. (2001) — catastrophic shifts in ecosystems",
          "doi": "10.1038/35098000"
        },
        {
          "note": "Terborgh et al. (2001) — ecological meltdown in predator-free forest fragments",
          "doi": "10.1126/science.1059257"
        }
      ],
      "last_reviewed": "2026-05-06",
      "file": "cross-domain/ecology-physics/b-trophic-cascades-phase-transitions.yaml"
    },
    {
      "id": "b-wildfire-dynamics-reaction-diffusion",
      "title": "Wildfire spread is a reaction-diffusion system: heat release (reaction front) coupled to heat transport (diffusion via radiation and convection), with climate-fire-atmosphere feedbacks producing pyroconvective plumes that drive fire spread exceeding 1 km/min.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "Wildfire spread is mathematically a reaction-diffusion system: fuel (vegetation) acts as a reactant; heat acts as the diffusing species; the fire front propagates as a traveling wave with speed determined by the balance of reaction rate (combustion) and diffusion (heat transport by radiation and con",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-climate-fire-feedback-accelerates-beyond-linear-projections"
      ],
      "communication_gap": "Fire ecologists who study post-fire vegetation recovery and fire regimes rarely engage with the atmospheric physics of pyroconvection. Fire behavior analysts who use Rothermel's model operationally often do not engage with the reaction- diffusion theory underlying it. Climate scientists modeling fire-atmosphere feedbacks often treat fire as a simple carbon flux without the fluid mechanics.\n",
      "translation_table": [
        {
          "field_a_term": "fire front propagation",
          "field_b_term": "traveling wave in reaction-diffusion equation",
          "note": "fire spreads as a self-sustaining wave; front speed = c = 2√(D·k)"
        },
        {
          "field_a_term": "fuel load (biomass per area)",
          "field_b_term": "reactant concentration in reaction-diffusion system",
          "note": "vegetation density and moisture set the reaction rate"
        },
        {
          "field_a_term": "heat transport by radiation + convection",
          "field_b_term": "diffusion coefficient D in reaction-diffusion equation",
          "note": "determines how far the heat front preheats fuels ahead of the flame"
        },
        {
          "field_a_term": "pyroconvection (fire-driven convective column)",
          "field_b_term": "Rayleigh-Bénard convection with reactive source term",
          "note": "strong enough to create its own mesoscale circulation; fire makes its own weather"
        },
        {
          "field_a_term": "fire perimeter fractal dimension D ≈ 1.3",
          "field_b_term": "fractal dimension of reaction front in heterogeneous medium",
          "note": "terrain and fuel heterogeneity create fractal fire shapes; D > 1 means non-Euclidean spread"
        }
      ],
      "references": [
        {
          "note": "Rothermel (1972) A mathematical model for predicting fire spread in wildland fuels; USDA Forest Service Res. Paper INT-115"
        },
        {
          "doi": "10.1071/WF03043",
          "note": "Clark et al. (2004) Coupled atmosphere-fire model convective feedback in wildfire simulations; Int J Wildland Fire 13:313"
        },
        {
          "doi": "10.1073/pnas.1607171113",
          "note": "Abatzoglou & Williams (2016) Impact of anthropogenic climate change on wildfire across western US forests; PNAS 113:11770"
        },
        {
          "doi": "10.1071/WF06143",
          "note": "Sullivan (2009) Wildland surface fire spread modelling — a review; Int J Wildland Fire 18:349"
        }
      ],
      "last_reviewed": "2026-05-07",
      "file": "cross-domain/ecology-physics/b-wildfire-dynamics-reaction-diffusion.yaml"
    },
    {
      "id": "b-common-pool-resources-game-theory",
      "title": "Ostrom's empirical study of common pool resource governance overturns Hardin's Tragedy of the Commons, showing that communities self-organise cooperative institutions using the repeated-game mechanism that game theory predicts but Hardin ignored.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "Hardin's \"Tragedy of the Commons\" (1968) argued that shared resources are inevitably depleted by rational self-interest — modelled as a one-shot prisoner's dilemma where defection dominates. Ostrom's fieldwork (2009 Nobel) showed this empirically false: communities worldwide self-organise governance",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-ostrom-commons-multilateral-failure"
      ],
      "communication_gap": "Hardin's metaphor entered popular culture; Ostrom's empirical refutation reached political science and environmental studies but has not fully penetrated mainstream economics or international relations theory, where the tragedy-of-commons framing still dominates. Game theory and institutional economics are rarely taught together with ecology in graduate programs.\n",
      "translation_table": [
        {
          "field_a_term": "common pool resource (fishery, forest)",
          "field_b_term": "shared payoff pool in repeated game",
          "note": "Both are rivalrous (use by one reduces availability) but non-excludable without governance"
        },
        {
          "field_a_term": "sustainable harvest rule",
          "field_b_term": "cooperative strategy in repeated game",
          "note": "The Nash equilibrium of the repeated game is the sustainable rule Ostrom communities discover"
        },
        {
          "field_a_term": "monitoring of harvests",
          "field_b_term": "observable action in game (enables Tit-for-Tat)",
          "note": "Without monitoring, cooperation collapses; Ostrom's principle 4 directly parallels game theory requirement"
        },
        {
          "field_a_term": "graduated sanctions for violation",
          "field_b_term": "proportional punishment in folk theorem",
          "note": "Folk theorem: any feasible payoff above minimax can be sustained with credible punishment"
        },
        {
          "field_a_term": "communication and collective choice",
          "field_b_term": "cheap talk game enabling coordination",
          "note": "Pre-play communication enables coordination equilibrium selection; absent in Hardin's model"
        }
      ],
      "references": [
        {
          "doi": "10.1126/science.162.3859.1243",
          "note": "Hardin, G. (1968). The tragedy of the commons. Science 162:1243."
        },
        {
          "note": "Ostrom, E. (1990). Governing the Commons. Cambridge University Press."
        },
        {
          "note": "Axelrod, R. (1984). The Evolution of Cooperation. Basic Books."
        },
        {
          "doi": "10.1126/science.1091015",
          "note": "Dietz, T., Ostrom, E. & Stern, P.C. (2003). The struggle to govern the commons. Science 302:1907."
        }
      ],
      "last_reviewed": "2026-05-06",
      "file": "cross-domain/ecology-social-science/b-common-pool-resources-game-theory.yaml"
    },
    {
      "id": "b-common-pool-resources-institutional-design",
      "title": "Ostrom's empirical refutation (Nobel 2009) of Hardin's tragedy of the commons shows communities self-organize sustainable governance via eight design principles; game-theoretically, cooperative equilibria are sustained when the discount factor δ > 1-1/N (Folk theorem), connecting ecology, social science, and game theory through the mathematics of repeated-game cooperation.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "Hardin (1968): individually rational overexploitation destroys shared resources — the \"tragedy\" occurs because each user's marginal cost is shared while marginal benefit is private. The game is a multi-player Prisoner's Dilemma: defect (overuse) dominates cooperate (restrain) in one-shot play. Ostro",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-ostrom-design-principles-digital-commons"
      ],
      "communication_gap": "Ecological economists, political scientists studying institutions, and game theorists work in largely separate communities. Hardin's paper (Science, biology audience) is famous but Ostrom's Nobel-winning empirical refutation (political science journals) is less widely known in natural sciences. The formal game-theoretic Folk theorem basis of Ostrom's findings is rarely cited in her empirical papers, creating a gap between the theoretical explanation and its empirical validation.\n",
      "translation_table": [
        {
          "field_a_term": "common pool resource (CPR) — subtractable, non-excludable",
          "field_b_term": "public good (non-excludable, non-rival) minus subtractability",
          "note": "CPR is the intermediate category between public goods and private goods"
        },
        {
          "field_a_term": "Hardin tragedy — dominant strategy to defect in one-shot game",
          "field_b_term": "Nash equilibrium of one-shot Prisoner's Dilemma",
          "note": "Hardin implicitly assumed no iteration, no communication, no institutional design"
        },
        {
          "field_a_term": "Ostrom monitoring principle (principle 4)",
          "field_b_term": "Folk theorem condition — mutual observation enables credible punishment",
          "note": "without monitoring, trigger strategies lose their incentive force"
        },
        {
          "field_a_term": "graduated sanctions (principle 5)",
          "field_b_term": "grim trigger vs tit-for-tat — partial punishment sustains cooperation",
          "note": "mild first-offense sanction minimizes loss from accidental defection"
        },
        {
          "field_a_term": "nested governance (principle 8)",
          "field_b_term": "hierarchical mechanism design with multiple principals",
          "note": "federal structure allows local adaptation within global coordination constraints"
        }
      ],
      "references": [
        {
          "doi": "10.1126/science.162.3859.1243",
          "note": "Hardin (1968) The tragedy of the commons. Science 162:1243–1248"
        },
        {
          "note": "Ostrom (1990) Governing the Commons; Cambridge University Press"
        },
        {
          "note": "Ostrom (2009) Beyond markets and states: polycentric governance of complex economic systems. Nobel Prize Lecture"
        },
        {
          "doi": "10.1126/science.1091015",
          "note": "Dietz et al. (2003) The struggle to govern the commons. Science 302:1907–1912"
        }
      ],
      "last_reviewed": "2026-05-06",
      "file": "cross-domain/ecology-social-science/b-common-pool-resources-institutional-design.yaml"
    },
    {
      "id": "b-conservation-psychology-pro-environmental",
      "title": "Conservation psychology's value-belief-norm theory bridges ecological science and social science, revealing that attitude-behavior gaps in pro-environmental action are better closed by behavioral defaults, social norms, and place attachment than by providing more ecological information.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "Conservation psychology studies the psychological factors driving pro-environmental behaviour. The value-belief-norm (VBN) theory (Stern 2000) proposes a causal chain: altruistic values → ecological worldview (NEP scale) → awareness of environmental consequences → felt moral obligation → pro-environ",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-place-attachment-mediates-conservation-behavior-more-than-vbn"
      ],
      "communication_gap": "Ecologists and conservation biologists publish in Biological Conservation and Conservation Biology; social psychologists publish in JPSP and Psychological Science; behavioral economists publish in AER and QJE. The VBN framework (Stern 2000) emerged from environmental sociology and is not widely known in conservation biology. Nudge theory (Thaler-Sunstein) is well-known in economics but rarely applied systematically to conservation management decisions. Conservation organizations often run information campaigns whose effectiveness social science has shown to be limited.\n",
      "translation_table": [
        {
          "field_a_term": "species at risk (ecology)",
          "field_b_term": "identifiable victim (social psychology)",
          "note": "Single species with a face (polar bear, panda) receives more conservation support than biodiversity statistics"
        },
        {
          "field_a_term": "critical habitat area threshold (percolation p_c)",
          "field_b_term": "conservation policy urgency framing",
          "note": "When habitat is below percolation threshold, urgency framing matters more — social science bridges to ecology"
        },
        {
          "field_a_term": "ecosystem service valuation",
          "field_b_term": "contingent valuation / willingness-to-pay (economics)",
          "note": "Connecting ecological benefit quantification to behavioral economics payment mechanisms"
        },
        {
          "field_a_term": "carry capacity / sustainability constraint",
          "field_b_term": "behavioral sufficiency — lifestyle change within planetary boundaries",
          "note": "Physical limits set the constraint; behavioral science designs the achievable pathway"
        },
        {
          "field_a_term": "trophic cascade (removing wolves changes vegetation)",
          "field_b_term": "social cascade — one visible conservation act triggers community norm shift",
          "note": "Both involve indirect effects propagating through a network — ecological and social dynamics are isomorphic"
        }
      ],
      "references": [
        {
          "doi": "10.1111/0022-4537.00175",
          "note": "Stern (2000) — New environmental theories: toward a coherent theory of environmentally significant behavior, J Soc Issues 56:407"
        },
        {
          "note": "Thaler & Sunstein (2008) Nudge: Improving Decisions About Health, Wealth, and Happiness, Yale"
        },
        {
          "doi": "10.1111/j.1467-9280.2007.01917.x",
          "note": "Schultz et al. (2007) — The constructive, destructive, and reconstructive power of social norms, Psychol Sci 18:429"
        },
        {
          "doi": "10.1177/0013916599316004",
          "note": "Kals et al. (1999) — Emotional affinity toward nature as a motivational basis to protect nature, Environ Behav 31:178"
        }
      ],
      "last_reviewed": "2026-05-06",
      "file": "cross-domain/ecology-social-science/b-conservation-psychology-pro-environmental.yaml"
    },
    {
      "id": "b-political-ecology-environmental-justice",
      "title": "Political ecology links power relations and resource access to quantifiable environmental injustice — PM2.5 exposure 1.54× higher for people of color (Tessum et al. 2021) — bridging social science power analysis with ecology, epidemiology, and environmental policy.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "Political ecology synthesizes Marxist political economy with ecology to show that environmental burdens and benefits are distributed through social structures of power, race, and class — not randomly or purely by market efficiency. Environmental justice is the empirical and normative research progra",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-pes-elite-capture-indigenous-displacement-monitoring-prevention"
      ],
      "communication_gap": "Political ecology is primarily a qualitative social science (published in Political Geography, Environment and Planning A) with roots in Marxist theory — largely invisible to quantitative ecologists and epidemiologists. Environmental justice research spans law (environmental law journals), public health (Environmental Health Perspectives, Am J Public Health), and social science without a unified quantitative framework. Ecosystem ecologists measuring biodiversity and function rarely engage with the political economy of who governs the ecosystems they study. Economists designing PES schemes rarely engage with the political ecology critique, and vice versa. The Ostrom SES framework bridges institutional economics and ecology but is still rarely applied with the power-analysis lens of political ecology.\n",
      "translation_table": [
        {
          "field_a_term": "environmental burden (pollution exposure, flood risk, heat island)",
          "field_b_term": "ecosystem disservice allocation in ecological accounting"
        },
        {
          "field_a_term": "racial/class gradient in PM2.5 exposure (Tessum et al. 2021)",
          "field_b_term": "non-random spatial distribution of environmental stressors"
        },
        {
          "field_a_term": "common pool resource governance (Ostrom SES framework)",
          "field_b_term": "collective action equilibrium in game theory / institutional economics"
        },
        {
          "field_a_term": "enclosure (private property displacing community governance)",
          "field_b_term": "regime shift from cooperative to competitive equilibrium"
        },
        {
          "field_a_term": "Payments for Ecosystem Services (PES)",
          "field_b_term": "Coasian bargaining for environmental externalities"
        },
        {
          "field_a_term": "elite capture in PES programs",
          "field_b_term": "rent extraction in market failure analysis"
        },
        {
          "field_a_term": "environmental racism (disproportionate burden on communities of color)",
          "field_b_term": "discriminatory exposure externality — market failure with social structure"
        }
      ],
      "references": [
        {
          "note": "Bullard (1990) Dumping in Dixie: Race, Class, and Environmental Quality — Westview Press (foundational environmental justice study)"
        },
        {
          "doi": "10.1126/science.aba5394",
          "note": "Tessum et al. (2021) Science 371:246 — PM2.5 polluters disproportionately and systemically affect people of color in the United States"
        },
        {
          "note": "Walker (2012) Environmental Justice: Concepts, Evidence and Politics — Routledge"
        },
        {
          "doi": "10.1016/j.envsci.2011.10.004",
          "note": "Leach et al. (2012) Environ Sci Policy 16:14 — transforming innovation for sustainability"
        }
      ],
      "last_reviewed": "2026-05-06",
      "file": "cross-domain/ecology-social-science/b-political-ecology-environmental-justice.yaml"
    },
    {
      "id": "b-resilience-theory-adaptive-management",
      "title": "Holling's ecological resilience theory (1973) — ecosystems have multiple stable states with resilience = basin of attraction width, not proximity to equilibrium — provides the panarchy framework applicable to social-ecological systems, cities, and institutions, connecting the fold bifurcation mathematics of alternative stable states to social tipping points and adaptive management.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "Holling (1973) distinguished resilience (ability to absorb disturbance without state change) from stability (return time to equilibrium). The \"ball in cup\" metaphor: the basin of attraction width determines resilience; a narrowing basin predicts catastrophic regime shift even as the system appears s",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-collective-action-ostrom-design-principles-v2"
      ],
      "communication_gap": "Holling (1973) published in Annual Review of Ecology and Systematics without citing bifurcation theory or Thom's catastrophe theory (published same year), despite working with the identical mathematics. Gunderson & Holling (2002) Panarchy deliberately avoided mathematical formalism to reach interdisciplinary audiences, at the cost of losing connection to physics and engineering communities who developed the same theory under different names (regime shifts, tipping elements, critical transitions).\n",
      "translation_table": [
        {
          "field_a_term": "ecological resilience (basin of attraction width)",
          "field_b_term": "institutional resilience (capacity to absorb political/economic shocks)",
          "note": "both measured by how large a perturbation is needed to flip the system to an alternate state"
        },
        {
          "field_a_term": "fold bifurcation / catastrophic regime shift",
          "field_b_term": "tipping point in social-ecological system",
          "note": "same mathematical structure; early warning signals (CSD) applicable to both"
        },
        {
          "field_a_term": "alternative stable states (e.g., clear lake vs turbid lake)",
          "field_b_term": "institutional bistability (e.g., trust vs distrust equilibria in governance)",
          "note": "both exhibit hysteresis — transition back requires larger effort than transition forward"
        },
        {
          "field_a_term": "panarchy adaptive cycle (r→K→Ω→α)",
          "field_b_term": "Schumpeterian creative destruction (growth, accumulation, disruption, innovation)",
          "note": "Schumpeter's economic cycle maps onto Holling's ecological cycle; similar 4-phase structure"
        },
        {
          "field_a_term": "slow variables (phosphorus loading, fishing pressure)",
          "field_b_term": "institutional parameters (inequality, social capital, institutional quality)",
          "note": "slow variables determine the position of the fold bifurcation; hard to change but determine stability"
        }
      ],
      "references": [
        {
          "doi": "10.1146/annurev.es.04.110173.000245",
          "note": "Holling (1973) — Resilience and stability of ecological systems; Annu Rev Ecol Syst 4:1"
        },
        {
          "note": "Gunderson & Holling (2002) — Panarchy; Island Press"
        },
        {
          "doi": "10.1146/annurev.ecolsys.35.021103.105711",
          "note": "Folke et al. (2004) — Regime shifts, resilience, and biodiversity in ecosystem management; Annu Rev Ecol Evol Syst 35:557"
        },
        {
          "doi": "10.5751/ES-00650-090205",
          "note": "Walker et al. (2004) — Resilience, adaptability and transformability; Ecol Soc 9:5"
        }
      ],
      "last_reviewed": "2026-05-06",
      "file": "cross-domain/ecology-social-science/b-resilience-theory-adaptive-management.yaml"
    },
    {
      "id": "b-traditional-knowledge-citizen-science",
      "title": "Traditional Ecological Knowledge and Citizen Science — indigenous fire management, FAIR+CARE data sovereignty, and iNaturalist crowd-sourced biodiversity monitoring bridge ancient and digital knowledge systems",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "Traditional Ecological Knowledge (TEK) encompasses the cumulative body of knowledge, practices, and beliefs about relationships between living beings (including humans) and their environment, developed by indigenous and local communities over millennia through direct contact with the environment. TE",
      "open_unknowns": [],
      "related_hypotheses": [],
      "communication_gap": "Academic ecologists and indigenous knowledge holders have historically operated within fundamentally different epistemological frameworks — Western science demands repeatability and falsifiability, while TEK is embedded in cultural, spiritual, and relational ontologies. Power asymmetries (colonialism, extractive research practices) have created justified indigenous scepticism of academic partnerships. Citizen science platforms have largely been designed by academic ecologists without meaningful co-design with indigenous communities. The CARE principles represent a genuine attempt to address this but implementation in practice remains uneven.\n",
      "translation_table": [
        {
          "field_a_term": "traditional ecological knowledge (TEK)",
          "field_b_term": "millennial-timescale empirical ecological database encoded in culture",
          "note": "TEK is not 'anecdotal' — it represents iterated observational experiments over thousands of generations with selection for accuracy"
        },
        {
          "field_a_term": "fire-stick farming (controlled mosaic burning)",
          "field_b_term": "landscape-scale pyrodiversity management for biodiversity and fire risk",
          "note": "Creates heterogeneous habitat patches at different post-fire succession stages; increases species richness"
        },
        {
          "field_a_term": "citizen science data (iNaturalist, eBird)",
          "field_b_term": "crowdsourced biodiversity occurrence records at continental scale",
          "note": "Requires detection probability modelling (occupancy models) to account for heterogeneous observer effort"
        },
        {
          "field_a_term": "CARE principles (indigenous data governance)",
          "field_b_term": "sovereignty-respecting framework for TEK integration in scientific databases",
          "note": "Complements FAIR: FAIR maximises data sharing; CARE ensures indigenous communities retain control over their knowledge"
        },
        {
          "field_a_term": "biocultural diversity (co-occurrence of linguistic and biodiversity hotspots)",
          "field_b_term": "cultural and ecological knowledge systems shaped by the same environments",
          "note": "Maffi & Woodley: 80% of world's biodiversity hotspots overlap with territories of indigenous peoples"
        },
        {
          "field_a_term": "phenological TEK indicators (traditional ecological calendars)",
          "field_b_term": "biological indicators of seasonal timing validated against formal phenology records",
          "note": "Aboriginal flowering phenology calendars predict pollinator activity timing more accurately than meteorological season boundaries"
        }
      ],
      "references": [
        {
          "note": "Berkes (2008) Sacred Ecology — TEK theory and practice"
        },
        {
          "doi": "10.1126/science.1199480",
          "note": "Bowman et al. (2011) Science 331:1,547 — Aboriginal fire management reduces emissions"
        },
        {
          "doi": "10.1371/journal.pone.0100578",
          "note": "Sullivan et al. (2014) PLoS ONE 9:e100578 — eBird data quality validation"
        },
        {
          "doi": "10.5334/dsj-2020-043",
          "note": "Carroll et al. (2020) Data Sci J 19:43 — CARE principles for indigenous data governance"
        }
      ],
      "last_reviewed": "2026-05-06",
      "file": "cross-domain/ecology-social-science/b-traditional-knowledge-citizen-science.yaml"
    },
    {
      "id": "b-species-distribution-maxent",
      "title": "MaxEnt species distribution modelling is the ecological application of Jaynes' maximum entropy principle: given presence-only occurrence data and environmental features, MaxEnt finds the distribution of maximum entropy subject to empirical feature constraints — a result formally identical to a Gibbs distribution and to maximum likelihood estimation in a Poisson point process model.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "Jaynes (1957) formulated the maximum entropy (MaxEnt) principle for statistical inference: among all probability distributions consistent with known constraints (expected values of observable features), choose the one of maximum Shannon entropy — the least-committal distribution given the data. Phil",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-maxent-invasive-species-prediction"
      ],
      "communication_gap": "Phillips et al. (2006) designed MaxEnt as a practical ecological tool without reference to Jaynes' information-theoretic formulation or exponential family statistics. The ecological SDM literature and the statistical exponential family literature developed independently for decades. The identity was recognised by Elith et al. (2011) and Renner & Warton (2013) but has not fully propagated to practitioners who still treat MaxEnt as a black box. Most ecologists using MaxEnt are unaware that they are fitting a Poisson point process model and that all standard diagnostic tools for Poisson regression apply to their results.\n",
      "translation_table": [
        {
          "field_a_term": "Species occurrence locations (presence-only data)",
          "field_b_term": "Observed data points from an inhomogeneous Poisson point process",
          "note": "Background samples in MaxEnt approximate the reference distribution of the point process"
        },
        {
          "field_a_term": "Environmental features f_k(x)",
          "field_b_term": "Sufficient statistics of the exponential family model",
          "note": "Linear, quadratic, and product features in MaxEnt correspond to mean, variance, and covariance constraints"
        },
        {
          "field_a_term": "MaxEnt distribution π(x)",
          "field_b_term": "Gibbs/Boltzmann distribution; canonical ensemble in statistical mechanics",
          "note": "Same mathematical form as the Boltzmann distribution in physics: π ∝ exp(-β E(x))"
        },
        {
          "field_a_term": "Lagrange multipliers λ_k",
          "field_b_term": "Regression coefficients in Poisson log-linear model",
          "note": "Estimated by iterative scaling (MaxEnt) or gradient ascent (logistic regression) — equivalent algorithms"
        },
        {
          "field_a_term": "Regularisation (L1/L2 penalties)",
          "field_b_term": "Prior distribution on regression coefficients (Laplace/Gaussian prior)",
          "note": "MaxEnt regularisation is MAP estimation in the Bayesian Poisson point process model"
        },
        {
          "field_a_term": "Habitat suitability score",
          "field_b_term": "Log-intensity of the Poisson point process",
          "note": "Proportional to the log-probability ratio relative to background; equivalent to logistic regression output"
        },
        {
          "field_a_term": "Climate change range shift projection",
          "field_b_term": "Out-of-sample prediction from a fitted exponential family model",
          "note": "Extrapolation risk: environmental values outside training range may violate linearity assumptions"
        }
      ],
      "references": [
        {
          "doi": "10.1103/PhysRev.106.620",
          "note": "Jaynes (1957) Phys Rev 106:620 — information theory and statistical mechanics; maximum entropy principle"
        },
        {
          "doi": "10.1016/j.ecolmodel.2005.03.026",
          "note": "Phillips, Anderson & Schapire (2006) Ecol Model 190:231 — MaxEnt for species distribution modelling"
        },
        {
          "doi": "10.1111/j.2041-210X.2011.00172.x",
          "note": "Elith et al. (2011) Methods Ecol Evol 2:433 — statistical explanation and calibration of MaxEnt as PPP"
        },
        {
          "doi": "10.1111/biom.12352",
          "note": "Renner & Warton (2013) Biometrics 69:274 — equivalence of MaxEnt and Poisson point process models"
        },
        {
          "doi": "10.1111/j.2041-210X.2012.00261.x",
          "note": "Fithian & Hastie (2013) Methods Ecol Evol — finite-sample equivalence of logistic regression and MaxEnt"
        }
      ],
      "last_reviewed": "2026-05-05",
      "file": "cross-domain/ecology-statistics/b-species-distribution-maxent.yaml"
    },
    {
      "id": "b-ecosystem-metabolic-scaling",
      "title": "Ecosystem gross primary production scales with total biomass raised to the 3/4 power, reflecting the same thermodynamic constraints on transport networks that govern metabolic rate scaling in individual organisms",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "The metabolic theory of ecology (MTE) predicts that individual metabolic rate B scales as M^(3/4) exp(-E/kT) due to fractal vascular network optimization, and this scaling propagates to ecosystem-level energy flux, setting carbon turnover rates, population densities, and food-web structure through a",
      "open_unknowns": [],
      "related_hypotheses": [],
      "communication_gap": "Ecologists measure ecosystem fluxes at the ecosystem level while thermodynamicists study energy dissipation at the molecular/organismal level; the MTE bridge between scales is known in macroecology but not widely taught in thermodynamics curricula.",
      "translation_table": [
        {
          "field_a_term": "individual metabolic rate B = B0 * M^(3/4)",
          "field_b_term": "Boltzmann-Arrhenius temperature correction exp(-E/kT)",
          "note": "3/4 exponent from fractal resource network; temperature term from enzyme kinetics; both enter the same MTE equation"
        },
        {
          "field_a_term": "ecosystem gross primary production (GPP)",
          "field_b_term": "sum of individual autotroph metabolic rates at ambient temperature",
          "note": "GPP integrates over the biomass distribution; its temperature sensitivity is ~0.65 eV matching the Arrhenius term"
        },
        {
          "field_a_term": "carbon turnover time",
          "field_b_term": "M^(1/4) exp(E/kT)",
          "note": "Slower turnover in larger organisms; faster in warmer environments; exact inverse of metabolic rate scaling"
        },
        {
          "field_a_term": "population energy use (population density times body mass times metabolic rate)",
          "field_b_term": "constant across body sizes within a trophic level",
          "note": "Damuth's law: energy equivalence across body sizes emerges from 3/4 scaling"
        }
      ],
      "references": [
        {
          "doi": "10.1126/science.1095244",
          "note": "Brown et al. (2004) Science - Metabolic Theory of Ecology unifying body size, temperature, and metabolic rate"
        },
        {
          "doi": "10.1038/35098076",
          "note": "West et al. (1999) Science - fractal vascular network derivation of 3/4 scaling law"
        },
        {
          "doi": "10.1111/j.1461-0248.2007.01094.x",
          "note": "Allen & Gillooly (2007) Ecology Letters - temperature dependence of ecosystem processes via MTE"
        }
      ],
      "last_reviewed": "2026-05-07",
      "file": "cross-domain/ecology-thermodynamics/b-ecosystem-metabolic-scaling.yaml"
    },
    {
      "id": "b-soil-carbon-microbial-thermodynamics",
      "title": "Soil carbon sequestration efficiency is governed by microbial thermodynamics: the carbon use efficiency (CUE) of soil microbes follows thermodynamic constraints on ATP yield per mole of carbon oxidized, bridging ecosystem ecology and bioenergetics.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "Microbial carbon use efficiency CUE = C_biomass / C_substrate_consumed is thermodynamically constrained by the Gibbs energy yield of the oxidation reaction (DeltaG_rxn per mole C); substrates with high energy yield per carbon (sugars) support higher CUE than recalcitrant compounds (lignin), and the ",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-microbial-cue-warming-feedback-carbon-cycle"
      ],
      "communication_gap": "Soil ecologists measure carbon fluxes and microbial biomass while thermodynamicists analyze energy yields of chemical reactions; the MEND framework explicitly links these but most Earth System Models use empirical temperature-sensitivity parameters (Q10) rather than mechanistic thermodynamic constraints, limiting projection accuracy.\n",
      "translation_table": [
        {
          "field_a_term": "carbon use efficiency CUE (ecology)",
          "field_b_term": "thermodynamic efficiency of anabolic vs catabolic reactions (thermodynamics)",
          "note": "CUE is bounded by the fraction of Gibbs energy available for biosynthesis after maintenance"
        },
        {
          "field_a_term": "substrate quality (soil ecology)",
          "field_b_term": "Gibbs energy of oxidation DeltaG_rxn per mole carbon (thermodynamics)",
          "note": "High DeltaG substrates (sugars, acetate) have higher CUE than low DeltaG compounds (lignin)"
        },
        {
          "field_a_term": "soil organic matter decomposition rate (ecology)",
          "field_b_term": "enzyme kinetics coupled to thermodynamic feasibility (thermodynamics)",
          "note": "Decomposition ceases when DeltaG_rxn < threshold for ATP synthesis (~-20 kJ/mol)"
        },
        {
          "field_a_term": "microbial biomass turnover (ecology)",
          "field_b_term": "maintenance energy requirement (thermodynamics)",
          "note": "A minimum power flux is required to maintain cellular structure against entropy production"
        }
      ],
      "references": [
        {
          "doi": "10.1038/s41558-018-0271-9",
          "note": "Wieder et al. (2018) - explicitly representing microbial carbon use efficiency in Earth system models"
        },
        {
          "doi": "10.1111/j.1461-0248.2010.01538.x",
          "note": "Manzoni et al. (2012) - stoichiometric controls on carbon, nitrogen, and phosphorus dynamics in soil"
        },
        {
          "doi": "10.1016/j.soilbio.2013.08.013",
          "note": "Wang et al. (2013) - MEND model: Michaelis-Menten kinetics coupled to microbial thermodynamics"
        }
      ],
      "last_reviewed": "2026-05-07",
      "file": "cross-domain/ecology-thermodynamics/b-soil-carbon-microbial-thermodynamics.yaml"
    },
    {
      "id": "b-wetland-carbon-storage-anaerobic-decomposition",
      "title": "Wetlands store disproportionate amounts of carbon because anaerobic conditions thermodynamically inhibit organic matter decomposition: without oxygen as the terminal electron acceptor, microbes must use energetically inferior redox couples, slowing carbon turnover and enabling peat accumulation over millennia.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "Microbial decomposition thermodynamics are governed by the Gibbs free energy yield of terminal electron acceptor (TEA) reactions: ΔG°'(O₂) = -2870 kJ/mol glucose >> ΔG°'(NO₃⁻) = -2670 >> ΔG°'(Fe³⁺) = -1410 >> ΔG°'(SO₄²⁻) = -380 >> ΔG°'(CO₂→CH₄) = -390 kJ/mol. In waterlogged soils, O₂ is rapidly depl",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-permafrost-thaw-carbon-bomb-thermodynamic-tipping-point"
      ],
      "communication_gap": "Wetland ecologists measuring carbon stocks and biogeochemists modeling decomposition thermodynamics have partially overlapping communities, but quantitative application of thermodynamic free energy yields to predict peat accumulation under different climate scenarios remains a frontier; the microbial ecology, soil physics, and thermodynamics communities rarely model these processes jointly.\n",
      "translation_table": [
        {
          "field_a_term": "waterlogged anoxic soil (ecology)",
          "field_b_term": "low-redox thermodynamic environment (thermodynamics)",
          "note": "Soil Eh < -200 mV forces use of low-ΔG° electron acceptors, slowing decomposition"
        },
        {
          "field_a_term": "peat carbon accumulation (ecology)",
          "field_b_term": "thermodynamic inhibition of oxidation reactions (thermodynamics)",
          "note": "Carbon accumulates because anaerobic pathways are thermodynamically unfavorable"
        },
        {
          "field_a_term": "methanogenesis rate (ecology)",
          "field_b_term": "Gibbs energy yield of CO₂/CH₄ redox couple (thermodynamics)",
          "note": "Low ΔG° of methanogenesis limits microbial energy yield and thus decomposition rate"
        },
        {
          "field_a_term": "redox stratification in wetland soil profile (ecology)",
          "field_b_term": "sequential thermodynamic reduction ladder (thermodynamics)",
          "note": "TEAs are used in order of decreasing ΔG°, creating depth-stratified redox zones"
        }
      ],
      "references": [
        {
          "doi": "10.1038/nature04586",
          "note": "Bridgham et al. (2006) - the carbon balance of North American wetlands"
        },
        {
          "doi": "10.1016/j.soilbio.2007.01.008",
          "note": "Keller & Bridgham (2007) - methane production potential in peat soils and thermodynamics"
        },
        {
          "doi": "10.1890/1051-0761(2002)012[0863:TWCOTO]2.0.CO;2",
          "note": "Limpens et al. (2008) - peatlands and the carbon cycle review"
        }
      ],
      "last_reviewed": "2026-05-07",
      "file": "cross-domain/ecology-thermodynamics/b-wetland-carbon-storage-anaerobic-decomposition.yaml"
    },
    {
      "id": "b-ellsberg-paradox-ambiguity-aversion",
      "title": "The Ellsberg paradox demonstrates that decision-makers prefer known-probability risks over unknown-probability ambiguity (ambiguity aversion), violating Savage's subjective expected utility axioms and requiring Choquet expected utility or maxmin expected utility theories that assign non-additive capacities to ambiguous events\n",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "In the Ellsberg urn experiment (30 red balls + 60 unknown black/yellow balls), most subjects prefer betting on red (known p=1/3) over black (unknown probability) in both direct and reversed conditions, violating the sure-thing principle; this ambiguity aversion is modeled by Choquet expected utility",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-maxmin-eu-ambiguity-aversion-amygdala"
      ],
      "communication_gap": "Behavioral economists study Ellsberg-type violations in laboratory choice tasks while cognitive scientists study uncertainty representation in neural circuits; the neurobiological basis of ambiguity aversion (how the brain represents and evaluates sets of possible probability distributions) is only beginning to be explored with neuroimaging.\n",
      "translation_table": [
        {
          "field_a_term": "Ellsberg urn paradox preference reversal (economics)",
          "field_b_term": "violation of Savage's P2 sure-thing principle / additive probability axiom (cognitive science)",
          "note": "Ellsberg preferences cannot be represented by any additive subjective probability measure over outcomes"
        },
        {
          "field_a_term": "ambiguity aversion / Ellsberg paradox (economics)",
          "field_b_term": "compound uncertainty aversion — preference for first-order over second-order uncertainty (cognitive science)",
          "note": "Ambiguity = unknown probability = compound lottery over possible probability distributions; aversion is stronger for compound uncertainty"
        },
        {
          "field_a_term": "Choquet capacity ν (non-additive) (economics)",
          "field_b_term": "subjective belief function encoding ambiguity where ν(A) + ν(A^c) ≤ 1 (cognitive science)",
          "note": "Subadditivity ν(A) + ν(A^c) < 1 captures ambiguity: the agent assigns less total probability than 1 to complement pairs"
        },
        {
          "field_a_term": "maxmin expected utility set C of priors (economics)",
          "field_b_term": "mental model of a set of possible probability distributions over outcomes (cognitive science)",
          "note": "Agent acts as if optimizing worst-case expected utility over a convex set C of possible probability models"
        }
      ],
      "references": [
        {
          "doi": "10.2307/1911131",
          "note": "Ellsberg (1961) - risk, ambiguity, and the Savage axioms"
        },
        {
          "doi": "10.2307/2938223",
          "note": "Gilboa & Schmeidler (1989) - maxmin expected utility with non-unique prior"
        }
      ],
      "last_reviewed": "2026-05-07",
      "file": "cross-domain/economics-cognitive-science/b-ellsberg-paradox-ambiguity-aversion.yaml"
    },
    {
      "id": "b-prospect-theory-loss-aversion",
      "title": "Prospect theory formalizes cognitive loss aversion as an asymmetric S-shaped value function with probability weighting, bridging behavioral economics and the psychophysics of decision under uncertainty.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "Kahneman and Tversky's prospect theory maps the cognitive phenomenon of loss aversion (losses loom approximately twice as large as equivalent gains) onto an asymmetric value function v(x) with v'(x) diminishing in both gains and losses, combined with a probability weighting function that overweights",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-prospect-theory-neural-value-coding"
      ],
      "communication_gap": "Economists model choice behavior mathematically while cognitive scientists study the underlying neural and computational mechanisms; the prospect theory framework is widely used in economics but rarely linked to its neurobiological implementation, which would allow predictions about when and why the model breaks down.\n",
      "translation_table": [
        {
          "field_a_term": "utility function U(x) (expected utility theory)",
          "field_b_term": "value function v(x) relative to reference point (prospect theory)",
          "note": "Prospect theory replaces absolute wealth utility with gain/loss relative to a reference point"
        },
        {
          "field_a_term": "loss aversion coefficient lambda ~ 2 (behavioral economics)",
          "field_b_term": "asymmetric response to negative vs positive stimuli (psychophysics)",
          "note": "Weber-Fechner law applied asymmetrically predicts steeper loss sensitivity"
        },
        {
          "field_a_term": "probability weighting function w(p)",
          "field_b_term": "subjective probability / cognitive probability distortion",
          "note": "Overweighting of rare events corresponds to availability heuristic in cognitive science"
        },
        {
          "field_a_term": "reference point dependence (economics)",
          "field_b_term": "perceptual adaptation / sensory baseline (cognitive neuroscience)",
          "note": "Neural adaptation sets a local reference; gains/losses are computed relative to it"
        }
      ],
      "references": [
        {
          "doi": "10.2307/1914185",
          "note": "Kahneman & Tversky (1979) Econometrica - original prospect theory paper"
        },
        {
          "doi": "10.1006/jets.1992.1042",
          "note": "Tversky & Kahneman (1992) - cumulative prospect theory"
        },
        {
          "doi": "10.1016/j.neuron.2009.09.016",
          "note": "Sokol-Hessner et al. (2009) - neural correlates of loss aversion in vmPFC"
        }
      ],
      "last_reviewed": "2026-05-07",
      "file": "cross-domain/economics-cognitive-science/b-prospect-theory-loss-aversion.yaml"
    },
    {
      "id": "b-optimal-transport-x-machine-learning",
      "title": "Optimal transport ↔ Machine learning — Wasserstein distance as probability metric",
      "source_domain": "",
      "target_domain": "",
      "status": "proposed",
      "bridge_claim": "The Wasserstein distance (earth mover's distance) from optimal transport theory provides a geometrically meaningful metric on probability distributions that captures spatial structure; Wasserstein GANs use it as the training loss, and it is now the standard distance for generative modeling, domain a",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-optimal-transport-x-machine-learning"
      ],
      "communication_gap": "Optimal transport theory (Monge 1781, Kantorovich 1942) was developed in pure mathematics and operations research. Generative models (GANs) were developed in deep learning research (Goodfellow 2014). The connection was made by Arjovsky et al. (2017) in the WGAN paper, which introduced Wasserstein distance to the machine learning community — bridging a 200-year-old mathematical problem and a 3-year-old deep learning architecture.",
      "translation_table": [
        {
          "field_a_term": "Wasserstein-1 distance W₁(μ,ν) (earth mover's distance, OT)",
          "field_b_term": "WGAN critic loss (machine learning)",
          "note": "WGAN replaces JS divergence with W₁; training signal flows even when distributions have disjoint support"
        },
        {
          "field_a_term": "optimal transport plan γ* (joint distribution minimising expected cost)",
          "field_b_term": "data-generating coupling between real and generated distributions",
          "note": "The Monge-Kantorovich transport plan describes the optimal matching between real and fake samples"
        },
        {
          "field_a_term": "Kantorovich duality (W₁ = max_{‖f‖_Lip≤1} E_μ[f] - E_ν[f])",
          "field_b_term": "WGAN critic function f (1-Lipschitz neural network)",
          "note": "WGAN critic implements the Kantorovich dual via weight clipping or gradient penalty"
        },
        {
          "field_a_term": "Sinkhorn regularised OT (entropic regularisation with parameter ε)",
          "field_b_term": "Sinkhorn loss for differentiable OT in machine learning",
          "note": "Sinkhorn algorithm provides O(n²/ε²) approximation; now standard in differentiable OT"
        }
      ],
      "references": [
        {
          "doi": "10.48550/arXiv.1701.07875",
          "note": "Arjovsky, Chintala & Bottou (2017) — Wasserstein GAN; ICML 2017"
        },
        {
          "doi": "10.48550/arXiv.1306.0895",
          "note": "Cuturi (2013) — Sinkhorn distances: lightspeed computation of OT; NeurIPS 2013"
        }
      ],
      "last_reviewed": "2026-05-07",
      "file": "cross-domain/economics-cs/b-optimal-transport-x-machine-learning.yaml"
    },
    {
      "id": "b-collective-risk-social-dilemma-x-insurance",
      "title": "Collective-risk dilemmas in evolutionary game theory — groups stochastically lose resources unless enough members contribute — mirror insurance and risk-pooling institutions in economics.",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "Evolutionary models of collective risk study cooperation under stochastic group loss: if total contributions fall below a threshold, everyone suffers with some probability. This resembles insurance contracts where premiums fund a pool that pays out after correlated shocks. Both settings feature nonl",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-risk-pooling-institutions-shift-evolutionary-stable-cooperation"
      ],
      "communication_gap": "Evolutionary biologists publish collective-risk games with abstract payoffs; economists study insurance markets with contracts. Crosswalks exist in theory reviews but less in empirical panels.",
      "translation_table": [
        {
          "field_a_term": "group loss probability",
          "field_b_term": "insured catastrophic event probability"
        },
        {
          "field_a_term": "cooperator contribution",
          "field_b_term": "premium payment / effort to maintain pool"
        },
        {
          "field_a_term": "evolutionarily stable strategy",
          "field_b_term": "Nash equilibrium under institutional rules"
        }
      ],
      "references": [
        {
          "doi": "10.1073/pnas.1011558108",
          "note": "Santos & Pacheco (2011) — collective-risk dilemmas and cooperation"
        },
        {
          "doi": "10.1038/s41598-017-08140-6",
          "note": "Representative follow-on collective-risk / threshold public goods (open access cluster)"
        }
      ],
      "last_reviewed": "2026-05-07",
      "file": "cross-domain/economics-ecology/b-collective-risk-social-dilemma-x-insurance.yaml"
    },
    {
      "id": "b-lotka-volterra-market-dynamics",
      "title": "Predator-prey (Lotka-Volterra) equations from theoretical ecology describe competitive dynamics in markets — incumbent firms vs. disruptive innovators, boom-bust cycles in commodity markets, and niche partitioning among competitors — with species coexistence mapping to Porter's competitive positioning and keystone predators mapping to market regulators.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "proposed",
      "bridge_claim": "The Lotka (1925) / Volterra (1926) equations for predator (y) and prey (x):\n  dx/dt = αx − βxy   (prey growth minus predation)\n  dy/dt = δxy − γy   (predator growth from prey minus mortality)\ngenerate closed oscillatory cycles in (x,y) phase space with period T ≈ 2π/√(αγ) and amplitude set by initia",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-lotka-volterra-semiconductor-capex-cycle"
      ],
      "communication_gap": "Economics textbooks treat competition with static equilibrium models (Cournot, Nash equilibrium); ecology uses dynamic differential-equation models. Econophysics (Farmer, Bouchaud, Mantegna) has imported LV dynamics into finance, but mainstream industrial economics and strategy research (Porter's tradition) rarely cites ecological dynamics literature. Biologists and economists do not typically co-author; the bridge runs almost entirely through the interdisciplinary complexity-economics community at SFI.\n",
      "translation_table": [
        {
          "field_a_term": "prey population x (ecology)",
          "field_b_term": "incumbent firm market share or commodity supply"
        },
        {
          "field_a_term": "predator population y (ecology)",
          "field_b_term": "disruptive entrant market share or demand shock"
        },
        {
          "field_a_term": "predation rate β",
          "field_b_term": "market displacement rate (innovator conversion of incumbent customers)"
        },
        {
          "field_a_term": "carrying capacity K (logistic)",
          "field_b_term": "total addressable market (TAM)"
        },
        {
          "field_a_term": "competitive exclusion (n ≤ resources)",
          "field_b_term": "Porter's competitive positioning (n ≤ market segments)"
        },
        {
          "field_a_term": "keystone predator",
          "field_b_term": "market regulator or dominant platform"
        },
        {
          "field_a_term": "trophic cascade",
          "field_b_term": "conglomerate merger reducing market diversity"
        },
        {
          "field_a_term": "GLV interaction matrix α_ij",
          "field_b_term": "market substitution/complementarity matrix"
        }
      ],
      "references": [
        {
          "url": "https://archive.org/details/elementsofphysic00lotk",
          "note": "Lotka (1925) Elements of Physical Biology — Williams & Wilkins, predator-prey equations"
        },
        {
          "doi": "10.2307/1622645",
          "note": "Volterra (1926) Fluctuations in the abundance of a species considered mathematically, Nature 118:558"
        },
        {
          "doi": "10.1038/460685a",
          "note": "Farmer & Foley (2009) The economy needs agent-based modelling, Nature 460:685"
        },
        {
          "doi": "10.1038/238413a0",
          "note": "May (1972) Will a large complex system be stable?, Nature 238:413"
        },
        {
          "doi": "10.1016/j.jedc.2004.08.008",
          "note": "Farmer et al. (2005) Is economics the next physical science?, J Econ Dyn Control"
        }
      ],
      "last_reviewed": "2026-05-05",
      "file": "cross-domain/economics-ecology/b-lotka-volterra-market-dynamics.yaml"
    },
    {
      "id": "b-natural-capital-ecosystem-services",
      "title": "Ecosystem services (pollination, water purification, carbon sequestration, flood control) are natural capital whose economic value ($33–125 trillion/year) is systematically excluded from market prices — a Pigouvian externality that requires carbon/biodiversity credits or national natural capital accounting (UN SEEA) to internalize into welfare-maximizing decisions.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "Ecology produces \"services\" — quantifiable flows of benefit to human welfare — that are economically analogous to any other factor of production (labor, physical capital). Costanza et al. (1997) estimated the global value of 17 ecosystem service categories at $33 trillion/year (revised to $125 trill",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-ecosystem-services-pigouvian-subsidy-biodiversity-market"
      ],
      "communication_gap": "Costanza et al. (1997) in Nature sparked controversy partly because economists objected to valuing non-market goods and ecologists objected to reducing biodiversity to dollar values. This disciplinary discomfort has slowed adoption of natural capital accounting despite the mathematical consistency of the approach with standard welfare economics. Environmental economics journals (Journal of Environmental Economics and Management) and ecology journals (Ecological Applications) rarely cite each other despite working on the same empirical problems.\n",
      "translation_table": [
        {
          "field_a_term": "ecosystem services (pollination, water filtration)",
          "field_b_term": "factor inputs in production function Y = f(K, L, N)",
          "note": "Natural capital N is a third factor of production alongside physical (K) and human (L) capital"
        },
        {
          "field_a_term": "ecosystem service value (willingness-to-pay)",
          "field_b_term": "shadow price of non-market goods",
          "note": "Estimated via hedonic pricing, travel cost, or contingent valuation methods"
        },
        {
          "field_a_term": "habitat destruction externality",
          "field_b_term": "Pigouvian externality — private benefit exceeds social benefit",
          "note": "Optimal policy: Pigouvian tax on conversion = marginal ecosystem service value"
        },
        {
          "field_a_term": "biodiversity as natural capital stock",
          "field_b_term": "capital stock K in neoclassical growth model",
          "note": "Depletion below minimum viable population is analogous to running down capital to zero"
        },
        {
          "field_a_term": "Payment for Ecosystem Services (PES)",
          "field_b_term": "Pigouvian subsidy to align private and social returns",
          "note": "PES pays landowners the externality value they forgo by not converting habitat"
        },
        {
          "field_a_term": "UN SEEA ecosystem accounts",
          "field_b_term": "balance sheet entry for natural capital (alongside financial and physical capital)",
          "note": "Extends SNA (System of National Accounts) to include ecosystem assets"
        }
      ],
      "references": [
        {
          "doi": "10.1038/387253a0",
          "note": "Costanza et al. (1997) Nature 387:253 — global ecosystem services valuation"
        },
        {
          "note": "Daily, G.C. (ed.) (1997) Nature's Services. Island Press."
        },
        {
          "note": "Dasgupta, P. (2021) The Economics of Biodiversity. HM Treasury, UK Government."
        },
        {
          "note": "TEEB (2010) The Economics of Ecosystems and Biodiversity. UNEP."
        }
      ],
      "last_reviewed": "2026-05-06",
      "file": "cross-domain/economics-ecology/b-natural-capital-ecosystem-services.yaml"
    },
    {
      "id": "b-price-elasticity-x-elastic-stiffness-tensor-analogy",
      "title": "Economic price elasticities quantify fractional demand/supply response ratios to relative price perturbations — mechanical stiffness tensors relate stress to strain as an anisotropic linear operator — formal Jacobian symmetry differs from elastic reciprocal relations except under restrictive coupled modeling assumptions; **the bridge is a cautious analogy between comparative statics slopes and moduli**, not identity of consumer theory with continuum mechanics.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "proposed",
      "bridge_claim": "Own-price Marshallian elasticity behaves locally like a normalized slope linking percentage quantity change to percentage price change — linear elastic materials expose proportionality constants mapping strain vectors to stress tensors — thinking of markets as local responses to perturbations borrow",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-local-equilibrium-jacobian-best-conditioned-axis-aligns-with-principal-strain-demo-only"
      ],
      "communication_gap": "Economics emphasizes rational comparative statics under budget constraints; mechanics emphasizes tensor positivity and frame indifference — interdisciplinary texts rarely warn readers where metaphors must stop.\n",
      "translation_table": [
        {
          "field_a_term": "Own-price elasticity η = (p/q)(dq/dp)",
          "field_b_term": "Directional Young-type modulus along a fixed strain probe direction",
          "note": "Both describe incremental linearized response magnitudes — domains differ."
        },
        {
          "field_a_term": "Cross-price elasticities forming Jacobian blocks between goods",
          "field_b_term": "Coupled off-diagonal stiffness coefficients / shear coupling terms",
          "note": "Cross effects resemble coupling — Slutsky symmetry ≠ elastic reciprocal symmetry."
        },
        {
          "field_a_term": "Cobb–Douglas unitary income-share constraints (example systems)",
          "field_b_term": "Incompressible elasticity constraint Tr(ε)=0 analogies (informal only)",
          "note": "Useful metaphor risk; treat only as heuristic spark for differential geometry pedagogy."
        }
      ],
      "references": [
        {
          "doi": "10.2307/2171802",
          "note": "Berry, Levinsohn & Pakes (1995) — automobile prices and demand estimation (elasticities via discrete-choice inversion)"
        },
        {
          "doi": "10.1017/CBO9780511617794",
          "note": "Holzapfel (2000) — nonlinear solid mechanics foundations for stiffness tensors"
        }
      ],
      "last_reviewed": "2026-05-07",
      "file": "cross-domain/economics-engineering/b-price-elasticity-x-elastic-stiffness-tensor-analogy.yaml"
    },
    {
      "id": "b-game-theoretic-vaccination-x-herd-immunity-threshold",
      "title": "Nash equilibria of voluntary vaccination games embed economic incentives (cost of vaccination versus infection risk) whose interior solutions relate to classical herd-immunity thresholds from mass-action SIR models — linking microeconomic strategic complements to macroscopic epidemiological critical vaccination coverage p_c = 1 − 1/R₀ when rational expectations incorporate prevalence feedback.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "When vaccine uptake is modeled as a multiplayer game with imitation dynamics or payoff-dependent adoption, equilibrium vaccine coverage often sits below social optima due to free riding — comparing equilibrium coverage with SIR-derived p_c exposes gaps between privately rational behavior and elimina",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-price-subsidy-closes-nash-herd-gap-in-agent-based-metapopulations"
      ],
      "communication_gap": "Mathematical epidemiology textbooks present R₀ algebra while microeconomics presents public-goods games — integrated behavioral epidemiology papers exist but policy briefs often cite only one lexicon.\n",
      "translation_table": [
        {
          "field_a_term": "Nash equilibrium vaccination probability under voluntary uptake",
          "field_b_term": "herd-immunity threshold derived from next-generation matrix eigenvalues",
          "note": "Equality holds only under restrictive symmetry and perfect information assumptions."
        },
        {
          "field_a_term": "externality from herd protection (nonrival infection-risk reduction)",
          "field_b_term": "positive spillovers in public goods provision (Samuelson condition)",
          "note": "Economic framing emphasizes incentives; epidemiology emphasizes threshold algebra."
        },
        {
          "field_a_term": "mixed-strategy equilibria with vaccine hesitancy segments",
          "field_b_term": "fractional critical immunity with heterogeneous susceptibility",
          "note": "Heterogeneity breaks crisp thresholds into distributions of critical coverage."
        }
      ],
      "references": [
        {
          "doi": "10.1073/pnas.0406843101",
          "note": "Bauch & Earn (2004) — vaccination and the theory of games (PNAS)"
        },
        {
          "doi": "10.1098/rspb.2011.0812",
          "note": "Reluga, Bauch & Galvani (2011) — evolutionary game theory of vaccine uptake and population dynamics (Proc. R. Soc. B)"
        }
      ],
      "last_reviewed": "2026-05-07",
      "file": "cross-domain/economics-epidemiology/b-game-theoretic-vaccination-x-herd-immunity-threshold.yaml"
    },
    {
      "id": "b-signaling-theory-handicap-principle",
      "title": "Zahavi's handicap principle in evolutionary biology is the biological realization of Spence's job-market signaling model: costly signals are honest in evolutionary equilibrium because the signal cost C(t, q) is negatively correlated with quality q (single-crossing property), ensuring low-quality senders cannot profitably mimic high-quality senders",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "Spence (1973) showed that costly educational signaling is honest in Nash equilibrium when the single-crossing property holds: d/dq[dC(t,q)/dt] < 0, meaning higher-ability workers face lower marginal cost for signaling; Zahavi (1975) proposed the same mechanism for animal ornaments — peacock tails, s",
      "open_unknowns": [],
      "related_hypotheses": [],
      "communication_gap": "Evolutionary biologists debate the mechanisms of honest signaling empirically through field studies of animal displays while economists model signaling games theoretically; the mathematical equivalence of the Zahavi handicap principle and Spence signaling equilibrium is recognized in theoretical evolutionary biology but rarely taught in economics courses on information economics.",
      "translation_table": [
        {
          "field_a_term": "handicap ornament / costly display (evolutionary biology)",
          "field_b_term": "costly education credential / job market signal (economics)",
          "note": "Both are equilibrium signals satisfying single-crossing: signal cost decreases with quality, preventing mimicry"
        },
        {
          "field_a_term": "evolutionary stable strategy (ESS) for honest signaling (evolutionary biology)",
          "field_b_term": "separating Nash equilibrium in signaling game (economics)",
          "note": "ESS corresponds to a separating equilibrium where each type chooses a unique signal level that is not profitably imitated"
        },
        {
          "field_a_term": "fitness cost of handicap ornament (evolutionary biology)",
          "field_b_term": "opportunity cost of education signal (economics)",
          "note": "Low-quality individuals incur disproportionate cost for the same signal, providing the incentive compatibility constraint"
        },
        {
          "field_a_term": "handicap principle: only the fit can afford the signal (evolutionary biology)",
          "field_b_term": "single-crossing property: dC/dt decreasing in quality q (economics)",
          "note": "Mathematical condition that guarantees honest equilibrium in both biology and economics"
        }
      ],
      "references": [
        {
          "doi": "10.1086/285895",
          "note": "Grafen (1990) - biological signals as handicaps: formal proof of honest signaling ESS"
        },
        {
          "doi": "10.2307/1882010",
          "note": "Spence (1973) - job market signaling: original economic signaling model"
        },
        {
          "doi": "10.1093/beheco/4.3.234",
          "note": "Zahavi (1975) - mate selection and the handicap principle: original proposal"
        }
      ],
      "last_reviewed": "2026-05-07",
      "file": "cross-domain/economics-evolutionary-biology/b-signaling-theory-handicap-principle.yaml"
    },
    {
      "id": "b-efficient-markets-martingale",
      "title": "The Efficient Market Hypothesis (Fama 1970) — that asset prices reflect all available information — is the statement that price processes are martingales (E[P_{t+1}|F_t] = P_t); market anomalies are quantifiable as residual mutual information between price history and future returns.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "Fama (1970) defined the Efficient Market Hypothesis (EMH): asset prices fully reflect all available information. Samuelson (1965) showed that this is mathematically equivalent to the statement that price processes are martingales: E[P_{t+1} | F_t] = P_t, where F_t is the filtration of all informatio",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-martingale-ecological-pricing"
      ],
      "communication_gap": "Fama (1970) and Samuelson (1965) wrote for finance journals without using information-theoretic language. Shannon's information theory was known to them but not operationalised in the EMH context. Cover & Thomas (2006) discuss the Kelly criterion and information theory but the explicit connection to market efficiency is not a central theme. Lo's Adaptive Market Hypothesis (2004) uses the information metaphor but not formal mutual information. Finance researchers use statistical tools (autocorrelation, variance ratio tests) rather than information-theoretic measures because the latter require large samples and non-parametric estimators that were computationally expensive until recently.\n",
      "translation_table": [
        {
          "field_a_term": "Efficient market (EMH, Fama 1970)",
          "field_b_term": "Zero-redundancy channel (no exploitable mutual information)",
          "note": "Efficient market = channel with zero capacity for systematic profit extraction"
        },
        {
          "field_a_term": "Martingale property E[P_{t+1}|F_t] = P_t",
          "field_b_term": "Zero mutual information I(P_{t+1}; P_t, P_{t-1}, ...) = 0",
          "note": "Mathematical equivalence: martingale ↔ unpredictability ↔ zero MI"
        },
        {
          "field_a_term": "Insider trading (private information advantage)",
          "field_b_term": "Side channel (higher-capacity channel available to privileged sender)",
          "note": "Insider has access to larger filtration F_t; strictly positive MI with future prices"
        },
        {
          "field_a_term": "Market anomaly (momentum, mean-reversion)",
          "field_b_term": "Residual mutual information I(future return; price history)",
          "note": "Anomalies quantify inefficiency as exploitable MI; should decay as arbitrageurs act"
        },
        {
          "field_a_term": "Kelly criterion (optimal bet size)",
          "field_b_term": "Channel capacity (maximum information extraction rate)",
          "note": "Kelly growth rate = MI between signal and outcome; zero in efficient market"
        },
        {
          "field_a_term": "Adaptive Market Hypothesis (Lo 2004)",
          "field_b_term": "Adaptive coding (channel capacity changes as agents learn)",
          "note": "Markets dynamically approach zero-redundancy as traders eliminate MI"
        }
      ],
      "references": [
        {
          "doi": "10.2307/2325486",
          "note": "Fama (1970) J Finance 25:383 — Efficient Capital Markets review"
        },
        {
          "doi": "10.1287/mnsc.11.6.B197",
          "note": "Samuelson (1965) Ind Mgmt Rev 6:41 — Proof that properly anticipated prices fluctuate randomly"
        },
        {
          "note": "Cover & Thomas (2006) Elements of Information Theory, 2nd ed. (Wiley) — Kelly criterion and information theory"
        },
        {
          "doi": "10.3905/jpm.2004.442611",
          "note": "Lo (2004) J Portfolio Mgmt 30:15 — Adaptive Market Hypothesis"
        },
        {
          "note": "Kelly (1956) Bell Syst Tech J 35:917 — A New Interpretation of Information Rate"
        }
      ],
      "last_reviewed": "2026-05-06",
      "file": "cross-domain/economics-information/b-efficient-markets-martingale.yaml"
    },
    {
      "id": "b-causal-forest-x-policy-elasticity-heterogeneity",
      "title": "Causal-forest effect heterogeneity estimation bridges machine-learned treatment surfaces and policy elasticity targeting.",
      "source_domain": "",
      "target_domain": "",
      "status": "proposed",
      "bridge_claim": "Speculative analogy (to be empirically validated): Causal forests can operationalize localized elasticity estimation similarly to structural policy analyses that segment populations by marginal response sensitivity.",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-causal-forest-heterogeneity-improves-policy-targeting-efficiency"
      ],
      "communication_gap": "Domain operators prioritize interpretable reliability diagnostics, while ML work often prioritizes aggregate accuracy without deployment-grade uncertainty audits.",
      "translation_table": [
        {
          "field_a_term": "model prior",
          "field_b_term": "domain prior",
          "note": "Both constrain inference in data-sparse regimes."
        },
        {
          "field_a_term": "uncertainty estimate",
          "field_b_term": "risk-aware decision support",
          "note": "Uncertainty quality determines practical utility."
        },
        {
          "field_a_term": "out-of-distribution behavior",
          "field_b_term": "deployment robustness",
          "note": "Shift sensitivity governs real-world reliability."
        }
      ],
      "references": [
        {
          "url": "https://projecteuclid.org/journals/annals-of-statistics/volume-47/issue-2/Generalized-random-forests/10.1214/18-AOS1709.full",
          "note": "Generalized random forests."
        }
      ],
      "last_reviewed": "2026-05-08",
      "file": "cross-domain/economics-machine-learning/b-causal-forest-x-policy-elasticity-heterogeneity.yaml"
    },
    {
      "id": "b-auction-design-x-complexity-theory",
      "title": "Auction Design x Computational Complexity - optimal auctions as NP-hard problems\n",
      "source_domain": "",
      "target_domain": "",
      "status": "proposed",
      "bridge_claim": "Computing the optimal (revenue-maximizing) mechanism for multi-item auctions with multiple bidders is NP-hard in general (Conitzer & Sandholm 2002); this hardness result explains why real-world auction design relies on simple heuristics (Vickrey, first-price) rather than optimal mechanisms - connect",
      "open_unknowns": [],
      "related_hypotheses": [],
      "communication_gap": "Mechanism design theory (Myerson, Maskin) and computational complexity (Cook, Karp) developed in economics and computer science respectively without cross-fertilization until the emergence of algorithmic mechanism design (Nisan, Ronen 1999), which unified them.\n",
      "translation_table": [
        {
          "field_a_term": "Revenue-maximizing mechanism (Myerson optimal auction)",
          "field_b_term": "NP-hard optimization problem",
          "note": "With one item and one bidder, Myerson's single-item optimal mechanism is computationally trivial; with k items and n bidders and correlated valuations, computing the optimal mechanism requires solving an exponentially large LP, which is NP-hard in k and n.\n"
        },
        {
          "field_a_term": "Bidder valuations (drawn from known distribution)",
          "field_b_term": "Input to optimization algorithm",
          "note": "The mechanism designer knows only the distribution from which valuations are drawn, not realized values; the optimal mechanism is computed from this distribution, analogous to offline algorithm design in complexity theory.\n"
        },
        {
          "field_a_term": "VCG mechanism (welfare-maximizing, strategyproof)",
          "field_b_term": "Polynomial-time approximation algorithm",
          "note": "VCG is polynomial-time computable and achieves social welfare maximization (not revenue maximization); it is the natural analogue of a greedy approximation algorithm that runs efficiently at the cost of suboptimal revenue.\n"
        },
        {
          "field_a_term": "Simple auction rules (posted price, sequential)",
          "field_b_term": "Approximation algorithm with constant factor guarantee",
          "note": "Posted-price mechanisms achieve constant-factor approximations to optimal revenue for independent bidders, analogous to FPTAS algorithms; the approximation ratio quantifies the revenue loss from computational tractability.\n"
        }
      ],
      "references": [
        {
          "doi": "10.1145/501720.501721",
          "note": "Conitzer & Sandholm (2002) - complexity of mechanism design; ACM EC 2002"
        },
        {
          "doi": "10.1145/509907.509928",
          "note": "Nisan & Ronen (1999) - algorithmic mechanism design; STOC 1999"
        }
      ],
      "last_reviewed": "2026-05-07",
      "file": "cross-domain/economics-math/b-auction-design-x-complexity-theory.yaml"
    },
    {
      "id": "b-arrow-impossibility-social-choice",
      "title": "Arrow's impossibility theorem proves mathematically that no social welfare function can simultaneously aggregate individual preferences into a consistent collective preference — making rational democratic aggregation provably impossible with ≥3 alternatives.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "Arrow's impossibility theorem (1951) proves: any social welfare function on ≥3 alternatives satisfying unanimity (Pareto efficiency) and independence of irrelevant alternatives (IIA) must be dictatorial. This is a mathematical theorem with immediate implications for voting systems, multi-criteria de",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-arrow-impossibility-voting-nudges"
      ],
      "communication_gap": "Arrow's theorem is known to economists and political scientists but rarely taught in computer science programs where ranking algorithms (PageRank, collaborative filtering) routinely violate IIA without awareness that this is a consequence of Arrow impossibility. The AI alignment connection is recognised but underdeveloped.\n",
      "translation_table": [
        {
          "field_a_term": "social welfare function (aggregation rule)",
          "field_b_term": "voting system / ranking algorithm",
          "note": "Any deterministic aggregation of individual rankings is a social welfare function"
        },
        {
          "field_a_term": "independence of irrelevant alternatives (IIA)",
          "field_b_term": "consistency of pairwise comparison outcomes",
          "note": "Ranking of A vs B should not change if C is added or removed"
        },
        {
          "field_a_term": "dictatorship condition",
          "field_b_term": "single-agent decision (authority rule)",
          "note": "The only IIA-satisfying unanimous aggregation is to copy one agent's ranking exactly"
        },
        {
          "field_a_term": "Condorcet cycle (intransitivity)",
          "field_b_term": "inconsistent majority preference",
          "note": "A beats B, B beats C, C beats A — no majority winner; the source of Arrow impossibility"
        }
      ],
      "references": [
        {
          "note": "Arrow, K.J. (1951). Social Choice and Individual Values. Wiley."
        },
        {
          "doi": "10.2307/1914083",
          "note": "Gibbard, A. (1973). Manipulation of voting schemes: a general result. Econometrica 41:587."
        },
        {
          "doi": "10.1016/0022-0531(75)90050-2",
          "note": "Satterthwaite, M. (1975). Strategy-proofness and Arrow's conditions. J Econ Theory 10:187."
        },
        {
          "note": "Sen, A. (1970). Collective Choice and Social Welfare. Holden-Day."
        }
      ],
      "last_reviewed": "2026-05-06",
      "file": "cross-domain/economics-mathematics/b-arrow-impossibility-social-choice.yaml"
    },
    {
      "id": "b-auction-theory-mechanism-design",
      "title": "The Vickrey-Clarke-Groves mechanism achieves the fundamental impossibility resolution in mechanism design — dominant-strategy truthfulness compatible with social welfare maximisation — while Myerson's optimal auction characterises revenue-maximising mechanisms via virtual value theory, unifying mathematical economics with computational allocation problems.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "The central problem of mechanism design: how to aggregate private information (valuations, preferences) from self-interested agents into collective decisions (allocations, prices) without the agents having incentives to misreport. The Gibbard-Satterthwaite theorem (1973) states that the only dominan",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-vcg-regretnet-combinatorial-approximation"
      ],
      "communication_gap": "Auction theory emerged in mathematical economics (Vickrey 1961 in Journal of Finance, Clarke 1971 in Public Choice), while the generalisation to mechanism design was done at the intersection of economics and operations research. Computer science discovered algorithmic game theory in the late 1990s (Nisan, Roughgarden) and independently rediscovered many mechanism design results, creating a parallel literature with different terminology. Practitioners in finance, procurement, and advertising who run multi-billion dollar auctions daily rarely read the original VCG papers, and academic mechanism designers rarely engage with the engineering challenges of running auctions at scale.\n",
      "translation_table": [
        {
          "field_a_term": "agent type (private valuation vᵢ)",
          "field_b_term": "random variable in type space Θᵢ"
        },
        {
          "field_a_term": "allocation rule (who gets the good)",
          "field_b_term": "deterministic function x: Θ → X (allocation correspondence)"
        },
        {
          "field_a_term": "payment rule (how much each agent pays)",
          "field_b_term": "transfer function t: Θ → ℝ"
        },
        {
          "field_a_term": "dominant-strategy incentive compatibility (DSIC)",
          "field_b_term": "Nash equilibrium in dominant strategies (truth-telling)"
        },
        {
          "field_a_term": "virtual value φ(v) = v - (1-F(v))/f(v)",
          "field_b_term": "marginal revenue in optimal pricing theory (analogous structure)"
        },
        {
          "field_a_term": "VCG payment (negative externality internalisation)",
          "field_b_term": "Pigou tax (social cost internalisation in public economics)"
        },
        {
          "field_a_term": "revenue equivalence (all efficient auctions equal revenue)",
          "field_b_term": "Modigliani-Miller theorem (capital structure irrelevance under conditions)"
        }
      ],
      "references": [
        {
          "doi": "10.1111/j.1540-6261.1961.tb04130.x",
          "note": "Vickrey (1961) J Finance 16:8 — second-price auction, Vickrey mechanism"
        },
        {
          "note": "Clarke (1971) Public Choice 11:17 — Clarke tax (VCG generalisation to public goods)"
        },
        {
          "doi": "10.1287/moor.6.1.58",
          "note": "Myerson (1981) Math Oper Res 6:58 — optimal auction design, virtual values"
        },
        {
          "note": "Nisan et al. (2007) Algorithmic Game Theory. Cambridge University Press"
        }
      ],
      "last_reviewed": "2026-05-06",
      "file": "cross-domain/economics-mathematics/b-auction-theory-mechanism-design.yaml"
    },
    {
      "id": "b-price-theory-walrasian-tatonnement",
      "title": "Walrasian tâtonnement is a price adjustment dynamical system whose convergence to competitive equilibrium is guaranteed by Lyapunov stability theory when all markets satisfy gross substitutability, providing rigorous mathematical foundations for general equilibrium price theory\n",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "Walras's tâtonnement process (prices rise when excess demand > 0, fall when < 0) is a continuous-time ODE dp_i/dt = k_i * z_i(p) where z_i is the excess demand for good i; global convergence to Walrasian equilibrium (z_i = 0 for all i) is proven via the Lyapunov function V(p) = Σ z_i(p)^2 / 2, which",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-tatonnement-convergence-diagonal-dominance"
      ],
      "communication_gap": "Economists state tâtonnement convergence theorems verbally or with informal arguments while dynamical systems mathematicians apply Lyapunov theory routinely; the explicit Lyapunov function construction for tâtonnement is known in mathematical economics but rarely presented to economics students who learn general equilibrium theory.\n",
      "translation_table": [
        {
          "field_a_term": "Walrasian equilibrium price vector p* (economics)",
          "field_b_term": "fixed point of the tâtonnement ODE system (mathematics)",
          "note": "p* solves z(p*) = 0; tâtonnement is the gradient flow converging to this fixed point under GS"
        },
        {
          "field_a_term": "excess demand function z_i(p) (economics)",
          "field_b_term": "vector field of the price adjustment ODE (mathematics)",
          "note": "Excess demand defines the direction and speed of price adjustment; GS ensures z is a contraction"
        },
        {
          "field_a_term": "gross substitutability condition ∂z_i/∂p_j > 0 (economics)",
          "field_b_term": "sign condition on the Jacobian ensuring Lyapunov stability (mathematics)",
          "note": "GS is a sufficient condition for the Lyapunov function to be strictly decreasing along tâtonnement paths"
        },
        {
          "field_a_term": "market clearing (economics)",
          "field_b_term": "equilibrium of the ODE system (mathematics)",
          "note": "Market clearing z_i = 0 ∀i is the equilibrium of the tâtonnement dynamical system"
        }
      ],
      "references": [
        {
          "doi": "10.2307/1907353",
          "note": "Arrow & Hurwicz (1958) - on the stability of the competitive equilibrium"
        },
        {
          "doi": "10.1016/0304-4068(74)90024-5",
          "note": "Smale (1976) - a convergent process of price adjustment and global Newton methods"
        }
      ],
      "last_reviewed": "2026-05-07",
      "file": "cross-domain/economics-mathematics/b-price-theory-walrasian-tatonnement.yaml"
    },
    {
      "id": "b-supply-chain-network-x-bond-percolation-disruption",
      "title": "Supply-chain risk analysts model firm–supplier edges failing under correlated shocks — resembling bond percolation on industrial networks where operational continuity requires giant connected components — enabling import of percolation thresholds, reliability polynomials, and network resilience metrics from discrete mathematics into operations research practice when modeling multi-tier disruptions.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "proposed",
      "bridge_claim": "Bond percolation retains edges with probability p — giant component emergence near p_c parallels systemic failure cascades when supplier edges drop below sustaining densities — stylized fact models trade realism for analytic thresholds akin to engineering reliability block diagrams. Multi-tier suppl",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-supply-chain-network-x-bond-percolation-disruption"
      ],
      "communication_gap": "MBA supply-chain syllabi emphasize Just-In-Time narratives over random-graph phase transitions; physics-trained network scientists publish percolation thresholds rarely calibrated on proprietary procurement graphs — leaving practitioner uptake uneven despite growing post-COVID interest.\n",
      "translation_table": [
        {
          "field_a_term": "Edge survival probability p after disruption draw",
          "field_b_term": "Bond occupation probability in percolation ensembles",
          "note": "Failure ensembles sampled empirically replace analytic IID assumptions when stress-tested historically."
        },
        {
          "field_a_term": "Giant connected component existence (functional supply network)",
          "field_b_term": "Percolating cluster spanning macroscopic lattice fraction",
          "note": "Operational KPI thresholds mimic finite-size scaling crossover widths near p_c."
        },
        {
          "field_a_term": "Redundant multisourcing (parallel suppliers)",
          "field_b_term": "Effective dimensionality / clustering coefficient boosting resilience vs tree-like risks",
          "note": "Loop-rich graphs shift thresholds analogous to correlated small-world modifications studied in network robustness literature."
        }
      ],
      "references": [
        {
          "doi": "10.1038/nphys3899",
          "note": "Bardoscia et al. (2017) Nature Physics — debt propagation network instability (finance network cascade exemplar cited for operator-style bridge metaphor)"
        },
        {
          "doi": "10.1103/RevModPhys.41.574",
          "note": "Essam (1972) — bond/site percolation theoretical thresholds baseline"
        }
      ],
      "last_reviewed": "2026-05-07",
      "file": "cross-domain/economics-mathematics/b-supply-chain-network-x-bond-percolation-disruption.yaml"
    },
    {
      "id": "b-inequality-health-gradient",
      "title": "Economic inequality dynamics (Pareto income distribution, poverty-trap bifurcations, Gini coefficient) predict population health phase transitions — the Gini coefficient functions as a control parameter for health outcome distributions in the same way temperature controls Ising model phase transitions.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "proposed",
      "bridge_claim": "The relationship between economic inequality and population health is not linear — it exhibits threshold behavior consistent with a phase transition. At low Gini coefficients (high equality), mean income and public health spending are the dominant predictors of population health. Above a critical Gi",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-polarisation-ising-phase-transition",
        "h-norm-cascade-ising-ew"
      ],
      "communication_gap": "Health economics and statistical physics have no overlapping literature or conferences. Social epidemiology uses regression analysis and structural equation modeling; the idea that inequality-health relationships exhibit bifurcation structure has been discussed metaphorically (Wilkinson and Pickett) but never formalized with dynamical systems tools. Physicists studying social Ising models are unaware of the health-inequality literature; epidemiologists are unfamiliar with phase transition mathematics. Neither group routinely tests for discontinuous transitions or hysteresis in empirical data.\n",
      "translation_table": [
        {
          "field_a_term": "Gini coefficient",
          "field_b_term": "control parameter in health-inequality phase transition; proxy for social coupling strength"
        },
        {
          "field_a_term": "Pareto income distribution (power-law tail)",
          "field_b_term": "power-law mortality gradient across socioeconomic quintiles; heavy tail of preventable deaths"
        },
        {
          "field_a_term": "poverty trap (stable low-income equilibrium)",
          "field_b_term": "chronic disease trap — poor health reduces productivity, reducing income, worsening health"
        },
        {
          "field_a_term": "phase transition (ordered → disordered above T_c)",
          "field_b_term": "health inequality phase transition above critical Gini — social trust collapse, mortality gradient steepening"
        },
        {
          "field_a_term": "Ising model social coupling J",
          "field_b_term": "social capital (generalized trust, cooperative norms) mediating health through psychosocial pathways"
        },
        {
          "field_a_term": "order parameter (magnetization)",
          "field_b_term": "population health Gini (distribution of life expectancy across income deciles)"
        }
      ],
      "references": [
        {
          "doi": "10.1136/bmj.b1235",
          "note": "Wilkinson & Pickett (2009) — The Spirit Level; health outcomes vs inequality across nations"
        },
        {
          "doi": "10.1016/j.socscimed.2013.02.002",
          "note": "Pickett & Wilkinson (2015) — income inequality and health: a causal review"
        },
        {
          "doi": "10.1126/science.1126216",
          "note": "Marmot (2005) — Status syndrome; social gradient in health across all income levels"
        },
        {
          "doi": "10.1103/RevModPhys.81.591",
          "note": "Castellano et al. (2009) — statistical physics of social dynamics; Ising models of opinion spread"
        },
        {
          "doi": "10.1146/annurev.publhealth.012809.103521",
          "note": "Braveman et al. (2010) — social determinants of health; income gradient mechanisms"
        },
        {
          "doi": "10.1080/17421772.2011.552135",
          "note": "Azariadis & Stachurski (2005) — poverty traps; bifurcation structure of development traps"
        }
      ],
      "last_reviewed": "2026-05-05",
      "file": "cross-domain/economics-medicine/b-inequality-health-gradient.yaml"
    },
    {
      "id": "b-trade-network-leontief-shock-propagation",
      "title": "The Leontief input-output model of inter-industry production is a weighted directed network whose spectral properties determine how supply shocks propagate across the global economy, making network percolation theory the natural language for systemic trade risk and macroeconomic fragility.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "The Leontief model represents the economy as a matrix A where A_ij = purchases by industry i from industry j per unit output. Total output vector x satisfies x = Ax + d (final demand d), solved as x = (I-A)^{-1} d (Leontief inverse). Each column of (I-A)^{-1} gives the full upstream supply chain req",
      "open_unknowns": [],
      "related_hypotheses": [],
      "communication_gap": "Economists apply Leontief models in industry and policy analysis without using network-theoretic tools (spectral analysis, percolation, community detection); network scientists rarely access detailed input-output tables. The shared mathematical object (weighted directed graph) is not yet standardly acknowledged.\n",
      "translation_table": [
        {
          "field_a_term": "Leontief input-output matrix A (economics)",
          "field_b_term": "weighted adjacency matrix of production network (network science)",
          "note": "A_ij is the fraction of sector i's input sourced from sector j"
        },
        {
          "field_a_term": "Leontief inverse (I-A)^{-1} (economics)",
          "field_b_term": "all-pairs weighted reachability matrix / Green's function (network science)",
          "note": "Encodes full upstream propagation of any sectoral shock"
        },
        {
          "field_a_term": "spectral radius rho(A) (economics)",
          "field_b_term": "percolation threshold / stability boundary (network science)",
          "note": "rho(A) < 1 required for convergence; proximity to 1 indicates fragility"
        },
        {
          "field_a_term": "systemic importance / upstream centrality (economics)",
          "field_b_term": "betweenness centrality / PageRank of production network (network science)",
          "note": "High-centrality sectors transmit shocks most broadly; identified by network topology"
        }
      ],
      "references": [
        {
          "doi": "10.2307/1928145",
          "note": "Leontief (1941) - The Structure of American Economy; original input-output model"
        },
        {
          "doi": "10.1146/annurev-economics-080213-041625",
          "note": "Acemoglu et al. (2016) - networks and the macroeconomy; Leontief as network"
        },
        {
          "doi": "10.1038/nature09014",
          "note": "Schweitzer et al. (2009) - economic networks; interdependence and fragility"
        }
      ],
      "last_reviewed": "2026-05-07",
      "file": "cross-domain/economics-network-science/b-trade-network-leontief-shock-propagation.yaml"
    },
    {
      "id": "b-arrows-impossibility-quantum-contextuality",
      "title": "Arrow's impossibility theorem in social choice theory and the Kochen-Specker theorem in quantum mechanics are structurally identical no-go results: both prove the impossibility of a globally consistent classical assignment — social preference orderings and quantum observable values — when subjected to the same type of coherence constraints.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "proposed",
      "bridge_claim": "Arrow's impossibility theorem (1951) states that no social welfare function can simultaneously satisfy Pareto efficiency, independence of irrelevant alternatives (IIA), and non-dictatorship for three or more candidates. Formally: there is no map f: L(A)^n → L(A) (where L(A) is the set of preference ",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-contextuality-mechanism-design-classification"
      ],
      "communication_gap": "Arrow's theorem is a foundational result in welfare economics and political science, studied in every graduate economics curriculum, but almost entirely without mathematical contact with quantum foundations. The Kochen-Specker theorem is a cornerstone of quantum logic and quantum information, but not taught in social science departments. The sheaf-theoretic unification by Abramsky et al. (2011-2012) exists in the computer science / quantum foundations literature but has had essentially zero uptake in economics or political science. The common obstacle is the unfamiliarity of sheaf cohomology in both communities.\n",
      "translation_table": [
        {
          "field_a_term": "Social welfare function f: L(A)^n → L(A)",
          "field_b_term": "Global value assignment v: O → ℝ on quantum observables",
          "note": "Both map local data (individual preferences / measurement contexts) to global structure"
        },
        {
          "field_a_term": "Independence of Irrelevant Alternatives (IIA)",
          "field_b_term": "Non-contextuality condition",
          "note": "The value/ranking of {A,B} must not depend on the presence of C in the measurement context / preference agenda"
        },
        {
          "field_a_term": "Pareto efficiency",
          "field_b_term": "Compatibility with QM probability predictions",
          "note": "The assignment must be consistent with the given data (votes / Born rule)"
        },
        {
          "field_a_term": "Non-dictatorship",
          "field_b_term": "No preferred measurement basis / non-signalling",
          "note": "No single agent / observable dominates the global assignment"
        },
        {
          "field_a_term": "Preference ordering over alternatives A",
          "field_b_term": "Eigenvalue assignment to observable in context C",
          "note": "Local consistency is always achievable; impossibility is at the global level"
        },
        {
          "field_a_term": "Global consistent preference order (global section)",
          "field_b_term": "Non-contextual hidden variable assignment (global section)",
          "note": "Sheaf-theoretic obstruction to existence is the same cohomological object"
        },
        {
          "field_a_term": "|A| ≥ 3 candidates required for impossibility",
          "field_b_term": "dim(H) ≥ 3 required for Kochen-Specker theorem",
          "note": "Below threshold, both results fail — two alternatives / qubit is always classically realisable"
        }
      ],
      "references": [
        {
          "note": "Arrow (1951) Social Choice and Individual Values — original impossibility theorem",
          "url": "https://doi.org/10.2307/2549741"
        },
        {
          "doi": "10.1016/j.tcs.2012.02.028",
          "note": "Abramsky & Brandenburger (2011) New Foundations for Contextuality — sheaf-theoretic unification of Arrow and KS"
        },
        {
          "doi": "10.1016/S0022-2496(86)80002-2",
          "note": "Kochen & Specker (1967) J Math Mech 17:59 — the problem of hidden variables in quantum mechanics"
        },
        {
          "arxiv": "1203.1184",
          "note": "Abramsky & Duncan (2012) — The sheaf-theoretic structure of non-locality and contextuality"
        },
        {
          "doi": "10.1017/S0305004100049052",
          "note": "Gleason (1957) Measures on the closed subspaces of Hilbert space — basis for KS in probabilistic form"
        }
      ],
      "last_reviewed": "2026-05-05",
      "file": "cross-domain/economics-physics/b-arrows-impossibility-quantum-contextuality.yaml"
    },
    {
      "id": "b-contagion-models-x-financial-crises",
      "title": "Epidemic models on networks — thresholds for global spread driven by connectivity and transmissibility — reappear in models of financial contagion where defaults propagate via exposures and liquidity shocks.",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "Compartmental and network SIR-style models emphasize a reproduction number–like threshold: below critical connectivity or shock transmission probability, disturbances die out locally; above it, cascades can percolate. Financial stability models represent institutions as nodes with asset and liabilit",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-interbank-default-cascades-exhibit-epidemic-thresholds"
      ],
      "communication_gap": "Epidemiology and finance use different jargon and institutional detail; cross-citations exist but parameter identifiability from partial regulatory data is seldom shared methodology.",
      "translation_table": [
        {
          "field_a_term": "basic reproduction number R0",
          "field_b_term": "amplification factor in exposure cascades"
        },
        {
          "field_a_term": "recovery rate",
          "field_b_term": "recapitalization / resolution speed"
        },
        {
          "field_a_term": "network degree distribution",
          "field_b_term": "interbank exposure concentration"
        }
      ],
      "references": [
        {
          "doi": "10.1093/rfs/hhm043",
          "note": "Gai & Kapadia (2010) — contagion in financial networks"
        },
        {
          "doi": "10.1086/261595",
          "note": "Allen & Gale (2000) — financial contagion and network structure"
        }
      ],
      "last_reviewed": "2026-05-07",
      "file": "cross-domain/economics-physics/b-contagion-models-x-financial-crises.yaml"
    },
    {
      "id": "b-dissipative-structures-economic-cycles",
      "title": "Economic systems are dissipative structures maintained far from thermodynamic equilibrium by continuous money and energy flows — Prigogine's theory of non-equilibrium self-organisation predicts that economic order (price patterns, business cycles, Kondratiev waves) emerges spontaneously from the thermodynamic irreversibility of economic transactions.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "proposed",
      "bridge_claim": "Prigogine & Stengers (1984) showed that non-equilibrium thermodynamic systems maintained far from equilibrium by continuous energy flux can spontaneously develop ordered spatial and temporal patterns (\"dissipative structures\") — the Belousov-Zhabotinsky reaction (chemical oscillations), Bénard conve",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-kondratiev-dissipative-entropy"
      ],
      "communication_gap": "Prigogine and Stengers (1984) explicitly proposed the economic application of dissipative structure theory, but mainstream economics dismissed it as imprecise metaphor. Georgescu-Roegen's entropic economics was influential in ecological economics but marginalised in mainstream economics departments. Nonlinear dynamics (Kaldor, Goodwin, Goodwin cycle) has a small home in heterodox economics but is absent from graduate macroeconomics curricula. The Kondratiev wave is documented empirically but has no accepted mechanistic explanation in mainstream economic theory. The mutual exclusion of mathematical physics and economics journals ensures these connections are rarely made explicitly.\n",
      "translation_table": [
        {
          "field_a_term": "Dissipative structure (Prigogine)",
          "field_b_term": "Economic market order — emergent price patterns, cycles, and institutions",
          "note": "Both maintained by continuous throughput (energy in physics; money, labor, resources in economics)"
        },
        {
          "field_a_term": "Free energy gradient (thermodynamic driving force)",
          "field_b_term": "Profit opportunity / price differential (economic driving force)",
          "note": "Flows of goods and labor move to equalise price differentials, as matter flows to equalise chemical potentials"
        },
        {
          "field_a_term": "Entropy production rate dS/dt",
          "field_b_term": "Economic throughput rate — physical resource degradation per unit time",
          "note": "Georgescu-Roegen's bioeconomics: GDP growth requires proportional entropy production (resource degradation)"
        },
        {
          "field_a_term": "Limit cycle (Hopf bifurcation)",
          "field_b_term": "Business cycle — periodic fluctuations in output, employment, investment",
          "note": "Kaldor (1940), Goodwin (1967) predator-prey business cycle; both exhibit Hopf bifurcations"
        },
        {
          "field_a_term": "Slow dissipative oscillation",
          "field_b_term": "Kondratiev wave — 45-60 year economic long wave",
          "note": "Driven by innovation-diffusion dynamics; timescale set by technological capital depreciation"
        },
        {
          "field_a_term": "Spontaneous symmetry breaking (pattern formation)",
          "field_b_term": "Market emergence — spontaneous price coordination in unplanned economies",
          "note": "Hayek's 'spontaneous order'; price system as decentralised computation discovering Pareto optima"
        },
        {
          "field_a_term": "Bifurcation parameter (control parameter)",
          "field_b_term": "Policy variable (interest rate, tax rate, aggregate demand)",
          "note": "Central bank interest rate modulates where the economy sits relative to the Hopf bifurcation"
        }
      ],
      "references": [
        {
          "note": "Prigogine & Stengers (1984) Order Out of Chaos (Bantam Books) — dissipative structures and self-organisation",
          "url": "https://www.simonandschuster.com/books/Order-Out-of-Chaos/Ilya-Prigogine/9780553343427"
        },
        {
          "note": "Kondratiev (1925) Voprosy Konjunktury 1:28 — long economic waves; empirical identification of ~50-year cycles",
          "url": "https://www.longwavegroup.com/archives/The_Long_Waves_in_Economic_Life.pdf"
        },
        {
          "note": "Georgescu-Roegen (1971) The Entropy Law and the Economic Process (Harvard UP) — entropy and irreversibility in economics",
          "url": "https://www.hup.harvard.edu/catalog.php?isbn=9780674281646"
        },
        {
          "doi": "10.2307/1906798",
          "note": "Kaldor (1940) Rev Econ Stud 7:14 — model of the trade cycle; Hopf bifurcation dynamics"
        },
        {
          "doi": "10.1017/CBO9781139523660",
          "note": "Goodwin (1967) — growth cycle; predator-prey macroeconomics; Lotka-Volterra business cycle"
        }
      ],
      "last_reviewed": "2026-05-05",
      "file": "cross-domain/economics-physics/b-dissipative-structures-economic-cycles.yaml"
    },
    {
      "id": "b-financial-markets-nonequilibrium",
      "title": "Financial markets are paradigmatic non-equilibrium systems — price returns exhibit the inverse cubic law (alpha ~ 3 fat tails), volatility clustering maps to GARCH/Heston stochastic-volatility dynamics, the square-root market impact law is a non-equilibrium flow phenomenon, and the continuous double auction is a far-from-equilibrium steady state, making econophysics the application of non-equilibrium statistical mechanics to capital markets.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "Financial markets violate equilibrium assumptions in ways that non-equilibrium statistical mechanics can describe quantitatively. The core bridge is between statistical physics of complex systems and the empirical \"stylised facts\" of financial returns.\nStylised fact 1 — Fat-tailed return distributio",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-inverse-cubic-law-agent-heterogeneity-mechanism"
      ],
      "communication_gap": "Econophysics emerged in the mid-1990s (Mantegna & Stanley 1995, Nature) when physicists trained in statistical mechanics began applying their methods to financial data. The reception from economists was mixed to hostile — economists have their own well-developed theory of financial markets (efficient market hypothesis, stochastic calculus, option pricing theory) and viewed physicists as naive about market mechanisms. Economists do not read Physical Review E; physicists do not read the Journal of Finance. The main bridge journals (Quantitative Finance, founded 2001) partially bridge this gap. Risk practitioners (quants) have absorbed much of the econophysics toolkit (random matrix theory, heavy-tail distributions, market impact modelling) without formal academic recognition of the cross-disciplinary transfer.\n",
      "translation_table": [
        {
          "field_a_term": "price return fat tail exponent alpha ~ 3 (inverse cubic law)",
          "field_b_term": "critical exponent of non-equilibrium stationary distribution",
          "note": "universal across markets, time scales, decades — treated as universality class in physics"
        },
        {
          "field_a_term": "volatility clustering (autocorrelation of |r_t|)",
          "field_b_term": "intermittency in turbulence (clustering of high-activity events)",
          "note": "same phenomenological structure; both described by multifractal cascades"
        },
        {
          "field_a_term": "GARCH(1,1) model (σ²_t = ω + α r²_{t-1} + β σ²_{t-1})",
          "field_b_term": "discrete-time approximation to Heston stochastic volatility diffusion",
          "note": "GARCH is the econometric tool; Heston is the physics-style continuous SDE"
        },
        {
          "field_a_term": "market impact ΔP ~ sqrt(Q/V) (square root law)",
          "field_b_term": "non-equilibrium order flow dynamics (concave impact from order book depth)",
          "note": "√Q law implies orders are not price-takers; market is far from equilibrium during large trades"
        },
        {
          "field_a_term": "bid-ask spread (minimum price increment)",
          "field_b_term": "energy gap in a driven lattice gas model of the order book",
          "note": "CDA microstructure models use driven diffusive system analogies"
        },
        {
          "field_a_term": "random matrix theory (Marchenko-Pastur distribution of noise eigenvalues)",
          "field_b_term": "separation of signal from noise in portfolio correlation matrices",
          "note": "physics random matrix theory directly applicable to financial covariance matrices"
        }
      ],
      "references": [
        {
          "doi": "10.1103/PhysRevE.60.5305",
          "note": "Gopikrishnan et al. (1999) Phys Rev E 60:5305 — inverse cubic law for financial returns"
        },
        {
          "note": "Mantegna & Stanley (1999) Introduction to Econophysics. Cambridge University Press"
        },
        {
          "note": "Almgren et al. (2005) Risk 18:58 — direct estimation of market impact (square root law)"
        },
        {
          "note": "Bouchaud & Potters (2003) Theory of Financial Risk and Derivative Pricing, 2nd ed. Cambridge University Press"
        }
      ],
      "last_reviewed": "2026-05-06",
      "file": "cross-domain/economics-physics/b-financial-markets-nonequilibrium.yaml"
    },
    {
      "id": "b-wealth-distribution-statistical-mechanics",
      "title": "The Boltzmann-Gibbs exponential wealth distribution arising from entropy maximization subject to wealth conservation is the economic analog of the Maxwell-Boltzmann energy distribution in statistical mechanics: mean wealth is the economic \"temperature,\" wealth exchanges are binary collisions, and the Lorenz curve is the cumulative distribution function of kinetic energy.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "Dragulescu & Yakovenko (2000) demonstrated that if economic agents exchange wealth in random pairwise interactions conserving total wealth (analogous to elastic collisions conserving energy), the stationary distribution is the Boltzmann-Gibbs exponential P(w) ~ exp(-w/⟨w⟩), where ⟨w⟩ plays the role ",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-pareto-exponent-growth-redistribution-ratio"
      ],
      "communication_gap": "Classical economics derives income distributions from optimization behavior (utility maximization, general equilibrium) and does not use entropy or statistical mechanics formalisms. The econophysics literature (mostly in European Physical Journal B, Physica A) has established the Boltzmann-Gibbs income distribution empirically and theoretically, but this work is largely unknown to mainstream economists who dismiss the lack of agent rationality assumptions as a theoretical weakness. The result is that a robust empirical regularity (exponential income distribution for the bottom 97%) and its predictive framework are not integrated into policy-relevant economic modeling.\n",
      "translation_table": [
        {
          "field_a_term": "Boltzmann factor exp(-E/kT) — probability of energy state E",
          "field_b_term": "Probability of wealth w: P(w) ~ exp(-w/⟨w⟩)",
          "note": "Identical functional form; derived from entropy maximization with conservation constraint"
        },
        {
          "field_a_term": "Temperature T = ⟨E⟩/k_B (average kinetic energy)",
          "field_b_term": "Mean wealth ⟨w⟩ — the economic 'temperature'",
          "note": "Higher T (higher average wealth) means wider distribution; inequality is not captured by T alone"
        },
        {
          "field_a_term": "Elastic binary collision conserving total kinetic energy",
          "field_b_term": "Two-agent wealth exchange transaction (trade, purchase, wage)",
          "note": "The stochastic exchange rule determines the stationary distribution"
        },
        {
          "field_a_term": "Fermi-Dirac distribution (quantum statistics, exclusion principle)",
          "field_b_term": "Wealth distribution with minimum income constraint (social floor)",
          "note": "Fermion exclusion corresponds to a lower bound on allowable wealth states"
        },
        {
          "field_a_term": "Bose-Einstein condensation (macroscopic occupation of ground state)",
          "field_b_term": "Extreme wealth concentration: one agent captures finite fraction of total wealth",
          "note": "Predicted by Bouchaud-Mezard when growth rate >> redistribution rate"
        },
        {
          "field_a_term": "Entropy S = -Σ p_i ln p_i (Boltzmann/Shannon)",
          "field_b_term": "Negative Gini coefficient — measure of distributional evenness",
          "note": "Maximum entropy corresponds to minimum Gini (most equal distribution for given mean)"
        },
        {
          "field_a_term": "Maxwell-Boltzmann CDF (cumulative distribution of energies)",
          "field_b_term": "Lorenz curve (cumulative fraction of wealth held by poorest x% of population)",
          "note": "The Lorenz curve is the economic Boltzmann CDF; both derived from the same exponential distribution"
        }
      ],
      "references": [
        {
          "doi": "10.1140/epjb/e2001-00122-9",
          "note": "Dragulescu & Yakovenko (2000) Eur Phys J B — \"Statistical mechanics of money\"; derives Boltzmann-Gibbs distribution for income; empirical validation with UK/US data\n"
        },
        {
          "doi": "10.1140/epjb/e2001-00113-x",
          "note": "Chakraborti & Chakrabarti (2000) Eur Phys J B — \"Statistical mechanics of money: effects of saving propensity\"; shows savings propensity heterogeneity generates Pareto tail\n"
        },
        {
          "doi": "10.1016/S0378-4371(00)00205-3",
          "note": "Bouchaud & Mezard (2000) Physica A — \"Wealth condensation in a simple model of economy\"; multiplicative growth vs. redistribution determines Pareto exponent; predicts wealth condensation\n"
        },
        {
          "doi": "10.1016/j.physrep.2014.11.001",
          "note": "Yakovenko & Rosser (2009) Rev Mod Phys — \"Colloquium: Statistical mechanics of money, wealth, and income\"; comprehensive review of the field with empirical data from 70+ countries\n"
        }
      ],
      "last_reviewed": "2026-05-05",
      "file": "cross-domain/economics-physics/b-wealth-distribution-statistical-mechanics.yaml"
    },
    {
      "id": "b-strategic-voting-mechanism-design-arrows-theorem",
      "title": "Strategic voting and electoral manipulation are analyzed by mechanism design theory and Arrow's impossibility theorem, connecting political science to mathematical social choice theory and game theory.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "Arrow's impossibility theorem proves that no rank-order voting rule satisfies unrestricted domain, Pareto efficiency, independence of irrelevant alternatives, and non-dictatorship simultaneously. The Gibbard-Satterthwaite theorem extends this: every non-dictatorial voting rule with ≥3 alternatives i",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-quadratic-voting-reduces-majority-tyranny"
      ],
      "communication_gap": "Political scientists studying electoral behavior and mathematical economists studying mechanism design publish in separate literatures; Arrow's theorem is widely cited in political theory but its mathematical implications (Gibbard-Satterthwaite, VCG) are rarely taught in political science programs.\n",
      "translation_table": [
        {
          "field_a_term": "voter preference ranking (political science)",
          "field_b_term": "utility function / type report in mechanism design (economics)",
          "note": "Ordinal rankings are the political analogue of utility types in revelation principle"
        },
        {
          "field_a_term": "strategic vote (vote for less-preferred candidate to block worst outcome) (political science)",
          "field_b_term": "misrepresentation / non-truthful strategy in mechanism (economics)",
          "note": "Gibbard-Satterthwaite guarantees that such manipulation is always profitable somewhere"
        },
        {
          "field_a_term": "Condorcet winner (beats all others pairwise) (political science)",
          "field_b_term": "Pareto-dominant social choice (economics/mathematics)",
          "note": "Arrow's IIA condition is precisely the axiom that would select Condorcet winners"
        },
        {
          "field_a_term": "electoral system design (political science)",
          "field_b_term": "mechanism design / revelation principle (economics)",
          "note": "Choosing a voting rule = choosing a social choice mechanism; VCG is the benchmark"
        }
      ],
      "references": [
        {
          "doi": "10.2307/2999600",
          "note": "Arrow (1950) — impossibility theorem for social welfare functions"
        },
        {
          "doi": "10.2307/1914083",
          "note": "Gibbard (1973) — manipulation of voting schemes"
        },
        {
          "doi": "10.1016/0022-0531(73)90050-1",
          "note": "Vickrey-Clarke-Groves mechanism — incentive-compatible public goods allocation"
        }
      ],
      "last_reviewed": "2026-05-07",
      "file": "cross-domain/economics-social-science/b-strategic-voting-mechanism-design-arrows-theorem.yaml"
    },
    {
      "id": "b-causal-inference-instrumental-variables",
      "title": "Causal inference in economics and epidemiology reduces to the potential outcomes framework (Rubin 1974), where instrumental variables (IV), regression discontinuity (RD), and difference-in-differences (DiD) estimators are all special cases of local average treatment effects (LATE) identified by exploiting quasi-random variation — formally equivalent to randomized controlled trials in specific subpopulations.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "The fundamental problem of causal inference (Holland 1986): for any unit i, we observe only Y_i(1) or Y_i(0) (potential outcomes under treatment/control), never both. The average treatment effect ATE = E[Y(1) - Y(0)] requires strong assumptions for identification from observational data. The potenti",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-iv-late-external-validity-population-representativeness"
      ],
      "communication_gap": "Statisticians developed potential outcomes (Rubin, Neyman) largely independently of econometricians developing IV and DiD methods; Pearl's graphical framework developed independently in computer science and AI. Despite formal equivalence, the communities use different notation, assumptions, and terminology, creating significant communication barriers. Epidemiology, economics, and statistics all teach causal inference differently. The Nobel Prize to Angrist-Imbens-Card (2021) has improved cross-field awareness of IV/DiD, but Pearl's do-calculus remains largely unfamiliar to most econometricians.\n",
      "translation_table": [
        {
          "field_a_term": "potential outcomes Y_i(1), Y_i(0) (Rubin)",
          "field_b_term": "counterfactual states of the world in causal graphical models (Pearl)",
          "note": "Formally equivalent for causal identification; Rubin favors design-based, Pearl favors structural"
        },
        {
          "field_a_term": "instrument Z (exogenous + exclusion restriction + relevance)",
          "field_b_term": "exogenous variation in a directed acyclic graph (DAG) — node with no incoming causal paths",
          "note": "Exclusion restriction = Z → Y path only through D; DAG formalizes this as no direct Z → Y edge"
        },
        {
          "field_a_term": "local average treatment effect (LATE)",
          "field_b_term": "treatment effect identified in the subpopulation of compliers",
          "note": "LATE is not ATE; IV estimates for compliers only — external validity requires complier analysis"
        },
        {
          "field_a_term": "parallel trends assumption (DiD)",
          "field_b_term": "counterfactual untreated outcome for treated group equals control group trend",
          "note": "Testable in pre-period; violated when treatment is selected on anticipatory trends"
        },
        {
          "field_a_term": "regression discontinuity bandwidth h → 0",
          "field_b_term": "local randomization assumption — units near threshold are as-if randomly assigned",
          "note": "Optimal bandwidth trades bias and variance; MSE-optimal bandwidth from Imbens-Kalyanaraman formula"
        }
      ],
      "references": [
        {
          "note": "Rubin (1974) Estimating causal effects of treatments in randomized and nonrandomized studies. J Educ Psychol 66:688"
        },
        {
          "doi": "10.2307/2951620",
          "note": "Angrist, Imbens & Rubin (1996) Identification of causal effects using instrumental variables. J Am Stat Assoc 91:444"
        },
        {
          "doi": "10.1017/CBO9780511803161",
          "note": "Pearl (2000) Causality: Models, Reasoning, and Inference. Cambridge University Press"
        },
        {
          "doi": "10.3982/ECTA6551",
          "note": "Imbens & Wooldridge (2009) Recent developments in the econometrics of program evaluation. J Econ Lit 47:5"
        }
      ],
      "last_reviewed": "2026-05-06",
      "file": "cross-domain/economics-statistics/b-causal-inference-instrumental-variables.yaml"
    },
    {
      "id": "b-graph-signal-processing-x-power-grid-pmu-anomaly-localization",
      "title": "Graph signal processing bridges spectral filtering theory and PMU-based power-grid anomaly localization.",
      "source_domain": "",
      "target_domain": "",
      "status": "proposed",
      "bridge_claim": "Speculative analogy: PMU streams are graph signals on transmission topology, so graph-wavelet energy can isolate localized disturbances faster than nodewise threshold alarms.",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-graph-wavelet-energy-localizes-pmu-grid-disturbances-better-than-scada"
      ],
      "communication_gap": "Communities use different terminology and validation conventions, masking transferable method equivalence.",
      "translation_table": [],
      "references": [
        {
          "doi": "10.1109/TSP.2013.2279681",
          "note": "Graph signal processing foundations."
        }
      ],
      "last_reviewed": "2026-05-08",
      "file": "cross-domain/electrical-engineering-computer-science/b-graph-signal-processing-x-power-grid-pmu-anomaly-localization.yaml"
    },
    {
      "id": "b-kuramoto-synchrony-x-beta-cell-islet-oscillations",
      "title": "Kuramoto-style phase synchrony formalism links power-grid stability tools with pancreatic beta-cell islet oscillations.",
      "source_domain": "",
      "target_domain": "",
      "status": "proposed",
      "bridge_claim": "Speculative analogy: Kuramoto-style phase synchrony formalism links power-grid stability tools with pancreatic beta-cell islet oscillations.",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-grid-inspired-phase-coherence-metrics-predict-beta-cell-dysfunction-earlier"
      ],
      "communication_gap": "The two communities use different notation, benchmarks, and publication venues, which obscures transferable mathematical structure.",
      "translation_table": [],
      "references": [
        {
          "doi": "10.1152/ajpendo.1998.275.6.E1119",
          "note": "Beta-cell oscillatory coordination."
        }
      ],
      "last_reviewed": "2026-05-08",
      "file": "cross-domain/electrical-engineering-systems-biology/b-kuramoto-synchrony-x-beta-cell-islet-oscillations.yaml"
    },
    {
      "id": "b-catalyst-sabatier-principle",
      "title": "The Sabatier principle (volcano plot) bridges electrochemistry and materials science: optimal catalysts bind reaction intermediates with intermediate strength, and DFT computes binding energies from electronic structure to guide catalyst design.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "The Sabatier principle states that the optimal catalyst for a reaction binds intermediates neither too strongly (reactants cannot desorb → catalyst poisoned) nor too weakly (reactants cannot adsorb → no reaction). Plotting catalytic activity against binding energy gives a \"volcano plot\" with optimal",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-dual-site-catalyst-breaks-oer-scaling"
      ],
      "communication_gap": "Surface science and electrochemistry overlap but differ in experimental conditions (ultra-high vacuum vs. aqueous electrolyte), making it difficult to directly compare surface-science binding energies with electrochemical overpotentials. The computational bridge (DFT + implicit solvation + electrode potential correction) spans this gap but is not universally applied by experimentalists.\n",
      "translation_table": [
        {
          "field_a_term": "d-band center εd (Hammer-Nørskov model)",
          "field_b_term": "intermediate binding energy ΔG* (electrochemical)",
          "note": "Higher d-band center → stronger adsorbate binding (d-band model)"
        },
        {
          "field_a_term": "OER overpotential η = E_applied − E_thermodynamic",
          "field_b_term": "thermodynamic free energy of limiting step ΔG_max − 1.23 eV",
          "note": "Overpotential = extra voltage above thermodynamic minimum"
        },
        {
          "field_a_term": "DFT adsorption energy ΔE_ads",
          "field_b_term": "electrochemical free energy ΔG* = ΔE_ads + ZPE − TΔS + eU",
          "note": "DFT energies corrected for zero-point energy, entropy, and electrode potential"
        },
        {
          "field_a_term": "volcano plot peak (optimal binding)",
          "field_b_term": "Sabatier optimum — maximum turnover frequency",
          "note": "RuO₂ and IrO₂ sit near OER volcano peak; earth-abundant alternatives needed"
        },
        {
          "field_a_term": "scaling relation ΔG_OOH* − ΔG_OH* ≈ 3.2 eV (universal)",
          "field_b_term": "theoretical minimum OER overpotential ≈ 0.4 V",
          "note": "This scaling relation is a thermodynamic constraint on all oxide catalysts"
        }
      ],
      "references": [
        {
          "note": "Sabatier (1911) Hydrogénations et déshydrogénations par catalyse. Ber Dtsch Chem Ges 44:1984-2001"
        },
        {
          "note": "Nørskov et al. (2004) Origin of the overpotential for oxygen reduction at a fuel-cell cathode. J Phys Chem B 108:17886-17892"
        },
        {
          "note": "Man et al. (2011) Universality in oxygen evolution electrocatalysis on oxide surfaces. ChemCatChem 3:1159-1165"
        },
        {
          "note": "Seh et al. (2017) Combining theory and experiment in electrocatalysis: insights into materials design. Science 355:eaad4998"
        }
      ],
      "last_reviewed": "2026-05-06",
      "file": "cross-domain/electrochemistry-materials-science/b-catalyst-sabatier-principle.yaml"
    },
    {
      "id": "b-nonhelical-landauer-reversible-em",
      "title": "Non-helical cavity resonators ↔ Landauer-limited reversible electromagnetic computation and memory (speculative engineering bridge)\n",
      "source_domain": "",
      "target_domain": "",
      "status": "proposed",
      "bridge_claim": "Non-helical (e.g. bifilar or meander) resonators in shielded cavities can reduce stray coupling and support high-Q modes that are attractive substrates for adiabatic or logically reversible manipulation of classical electromagnetic state variables (phase, amplitude, frequency). This connects natural",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-nonhelical-resonator-adiabatic-quantum-memory"
      ],
      "communication_gap": "Thermodynamics of computation is taught in CS and physics, but electromagnetic engineering curricula rarely connect reversible gate sets to cavity-QED-style hardware; conversely, RF engineers seldom frame measurements in bits erased per protocol. Non-helical geometries add niche fabrication vocabulary that sits outside both communities’ shared examples (CMOS, transmons).\n",
      "translation_table": [
        {
          "field_a_term": "Landauer erasure cost k_B T ln(2) per bit",
          "field_b_term": "Minimum heat associated with irreversible reset of a classical EM memory degree of freedom",
          "note": "Applies when a distinguishable state is merged or discarded without reversible embedding."
        },
        {
          "field_a_term": "Logically irreversible gate (e.g. erasure of ambiguity)",
          "field_b_term": "Operation that maps distinct resonator states onto one output without redundant DOF",
          "note": "Must budget at least Landauer-scale dissipation in principle."
        },
        {
          "field_a_term": "Logically reversible gate (Toffoli, Fredkin, reversible embedding)",
          "field_b_term": "Unitary or bijective evolution on an expanded state space including ancillas",
          "note": "No fundamental k_B T ln(2) floor per step if driven adiabatically in ideal hardware."
        },
        {
          "field_a_term": "Second law / mutual information",
          "field_b_term": "Entropy of hidden variables in lossy lines, amplifiers, or unmodeled radiation",
          "note": "Real circuits dissipate via channels outside the ideal reversible abstraction."
        }
      ],
      "references": [
        {
          "doi": "10.1038/nature10872",
          "note": "Bérut et al. (2012) — experimental Landauer-related validation philosophy for small systems."
        },
        {
          "doi": "10.1147/rd.53.0183",
          "note": "Landauer (1961) — minimum heat generation tied to logically irreversible operations."
        },
        {
          "doi": "10.1147/rd.173.0525",
          "note": "Bennett (1973) — reversible computation and relation to dissipation."
        }
      ],
      "last_reviewed": "2026-05-07",
      "file": "cross-domain/electromagnetism-computer-science/b-nonhelical-landauer-reversible-em.yaml"
    },
    {
      "id": "b-maxwell-equations-wave-encoding",
      "title": "Maxwell's equations in free space predict plane wave solutions with the same mathematical form as carrier waves in communications — the electromagnetic spectrum is a physical implementation of Shannon's abstract channel model.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "Maxwell's equations in free space admit plane wave solutions of the form E = E₀ exp(i(k·r − ωt)), which are identical in mathematical structure to the carrier waves used in all radio, microwave, and optical communications. Shannon's channel capacity theorem C = B log₂(1 + S/N) gives the theoretical ",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-maxwell-wave-channel-capacity-limit"
      ],
      "communication_gap": "Electrical engineering splits into \"fields and waves\" (Maxwell) and \"communications and signal processing\" (Shannon) in most curricula, creating practitioners who specialize in one but rarely both. The unification is implicit in the best wireless systems design but rarely stated as a foundational principle.\n",
      "translation_table": [
        {
          "field_a_term": "plane wave E = E₀ exp(i(k·r − ωt))",
          "field_b_term": "sinusoidal carrier wave at frequency f = ω/2π",
          "note": "The Maxwell wave solution is the physical carrier of information"
        },
        {
          "field_a_term": "electromagnetic bandwidth B (Hz)",
          "field_b_term": "channel bandwidth B in Shannon formula C = B log₂(1 + S/N)",
          "note": "Same quantity — frequency range available for modulation"
        },
        {
          "field_a_term": "signal power S / noise power N (Maxwell wave)",
          "field_b_term": "signal-to-noise ratio S/N in Shannon capacity",
          "note": "Maxwell propagation loss and Johnson-Nyquist noise set the S/N"
        },
        {
          "field_a_term": "polarization states of E-field (2 orthogonal)",
          "field_b_term": "2 independent Shannon channels (polarization multiplexing)",
          "note": "MIMO and polarization-division multiplexing exploit this directly"
        },
        {
          "field_a_term": "wave dispersion relation ω(k)",
          "field_b_term": "group delay and intersymbol interference in digital link",
          "note": "Dispersive channels require equalization to approach Shannon capacity"
        }
      ],
      "references": [
        {
          "note": "Maxwell (1865) A dynamical theory of the electromagnetic field. Phil Trans R Soc 155:459-512"
        },
        {
          "note": "Shannon (1948) A mathematical theory of communication. Bell Syst Tech J 27:379-423, 623-656"
        },
        {
          "note": "Nyquist (1924) Certain factors affecting telegraph speed. AIEE Trans 43:412-422"
        },
        {
          "note": "Proakis & Salehi (2008) Digital Communications, 5th ed. McGraw-Hill"
        }
      ],
      "last_reviewed": "2026-05-06",
      "file": "cross-domain/electromagnetism-information-theory/b-maxwell-equations-wave-encoding.yaml"
    },
    {
      "id": "b-bound-states-continuum-x-dielectric-metasurface-q",
      "title": "Bound states in the continuum (BIC) theory explains ultra-high-Q dielectric metasurface resonances and their sensitivity to fabrication disorder.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "proposed",
      "bridge_claim": "Symmetry-protected and accidental BIC concepts predict when radiative channels decouple, creating quasi-BIC resonances with very high quality factors in dielectric metasurfaces. This bridges scattering-matrix singularity theory to practical sensor and filter design limits under disorder.\n",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-bic-protected-metasurfaces-maintain-high-q-under-fabrication-noise"
      ],
      "communication_gap": "BIC theory papers often emphasize idealized infinite periodic structures, while engineering teams face finite-size, roughness, and process-variation constraints.\n",
      "translation_table": [
        {
          "field_a_term": "BIC symmetry protection",
          "field_b_term": "suppressed radiation loss and enhanced Q",
          "note": "Small symmetry breaking turns ideal BIC into tunable quasi-BIC resonances."
        },
        {
          "field_a_term": "coupling coefficient to continuum channels",
          "field_b_term": "linewidth control in metasurface resonators",
          "note": "Fabrication asymmetry sets residual radiative leakage floor."
        },
        {
          "field_a_term": "modal overlap and perturbation theory",
          "field_b_term": "sensitivity-yield tradeoff in nanofabricated devices",
          "note": "Higher Q increases sensitivity but tightens tolerance requirements."
        }
      ],
      "references": [
        {
          "doi": "10.1038/s41566-018-0314-4",
          "note": "Review and perspective on BIC-enabled nanophotonic and metasurface resonances."
        }
      ],
      "last_reviewed": "2026-05-08",
      "file": "cross-domain/electromagnetism-metamaterials/b-bound-states-continuum-x-dielectric-metasurface-q.yaml"
    },
    {
      "id": "b-epsilon-near-zero-metamaterial-x-field-confinement-quality-factor",
      "title": "Metamaterials engineered near an epsilon-near-zero (ENZ) permittivity crossover concentrate electromagnetic fields and reshape resonance quality factors because dispersion-dominated response modifies radiative and absorptive loss partitioning — nanophotonics ↔ cavity Q engineering distinct from helical chiral designs.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "Near an ENZ frequency ω_ENZ where Re ε(ω)→0, Maxwell boundary problems exhibit compressed wavelengths and enhanced local density of electromagnetic states in thin films and waveguides. High-Q resonances can emerge when material absorption Im ε is small relative to the curvature of ε(ω) (dispersion e",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-enz-crossover-curvature-predicts-local-q-maximum-thin-film-cavity"
      ],
      "communication_gap": "RF coil engineers rarely import nanophotonic ENZ language; optical metamaterials papers seldom cite power-frequency MHz coil optimization — scales differ but dispersion-shaped Q limits share mathematics.\n",
      "translation_table": [
        {
          "field_a_term": "ENZ condition Re ε(ω) → 0",
          "field_b_term": "region of extreme field enhancement / spatial compression in nanostructures",
          "note": "Requires dispersion so group velocity is physically meaningful; ε = 0 alone is singular without loss model."
        },
        {
          "field_a_term": "material dispersion d(Re ε)/dω",
          "field_b_term": "modal linewidth sensitivity via frequency pulling near crossover",
          "note": "Links absorption peaks to Q limits distinct from geometric mode volume alone."
        },
        {
          "field_a_term": "Purcell / LDOS reasoning at ENZ points",
          "field_b_term": "modified spontaneous emission / resonance linewidth scaling in cavity QED analog platforms",
          "note": "Classical analogies only unless quantum emitters are embedded."
        }
      ],
      "references": [
        {
          "doi": "10.1038/nphoton.2016.293",
          "note": "Liberal & Engheta (2016) — epsilon-near-zero optoelectronics (Nat. Photon.)."
        },
        {
          "doi": "10.1103/PhysRevLett.102.113901",
          "note": "Edwards, Silveirinha & Engheta (2009) — tunneling with epsilon-near-zero materials (Phys. Rev. Lett.)."
        }
      ],
      "last_reviewed": "2026-05-07",
      "file": "cross-domain/electromagnetism-metamaterials/b-epsilon-near-zero-metamaterial-x-field-confinement-quality-factor.yaml"
    },
    {
      "id": "b-fano-asymmetric-lineshape-x-metamaterial-dark-mode-quality-factor",
      "title": "Fano interference between broad radiative modes and narrow quasi-dark modes produces asymmetric scattering lineshapes with sharp linewidth features — the same spectral mathematics elevates effective Q and tailors metamaterial resonances without relying on helical geometry (nanophotonics ↔ metamaterials).\n",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "Coupled oscillator models show asymmetric Fano profiles σ(ω) ∝ |qΓ + ω − ω₀|²/(Γ² + (ω−ω₀)²) when discrete narrow resonances interfere with continua. Metamaterial and plasmonic nanoantennas engineer near-dark modes with partial cancellation of radiative damping, yielding sharp peaks exploitable for ",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-fano-q-factor-tracks-radiative-darkness-order-parameter"
      ],
      "communication_gap": "Atomic physics curricula emphasize Fano in photoionization; antenna engineers speak S-parameter poles and zeros — vocabulary differs though rational-function shapes coincide.\n",
      "translation_table": [
        {
          "field_a_term": "Fano asymmetry parameter q",
          "field_b_term": "modal interference ratio governing resonance steepness in metamaterial spectra",
          "note": "q controls dip vs peak asymmetry; extracted by fitting measured S-parameters or extinction."
        },
        {
          "field_a_term": "continuum–discrete interference",
          "field_b_term": "radiative branch coupled to trapped quasi-bound modes in structured resonators",
          "note": "“Dark” is leaky in practice; cancellation reduces radiative width."
        },
        {
          "field_a_term": "linewidth Γ = Γ_rad + Γ_abs",
          "field_b_term": "loaded Q = ω₀/Γ after Fano engineering suppresses Γ_rad subset",
          "note": "Absorption remains the ultimate floor at optical frequencies."
        }
      ],
      "references": [
        {
          "doi": "10.1038/nmat2696",
          "note": "Luk'yanchuk et al. (2010) — Fano resonance in plasmonic nanostructures (Nat. Mater.)."
        },
        {
          "doi": "10.1103/RevModPhys.82.2257",
          "note": "Miroshnichenko, Flach & Kivshar (2010) — Fano resonances in nanoscale structures (Rev. Mod. Phys.)."
        }
      ],
      "last_reviewed": "2026-05-07",
      "file": "cross-domain/electromagnetism-metamaterials/b-fano-asymmetric-lineshape-x-metamaterial-dark-mode-quality-factor.yaml"
    },
    {
      "id": "b-floquet-metamaterials-x-nonreciprocal-wave-mixing",
      "title": "Space-time modulated metamaterials use Floquet sideband coupling to implement effective nonreciprocal wave transport without static magnetic bias.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "proposed",
      "bridge_claim": "Periodic temporal modulation in metasurfaces couples harmonics asymmetrically in momentum-frequency space, enabling direction-dependent conversion and isolation-like behavior. This bridges Floquet operator theory and RF nonreciprocal component design under practical passivity and loss constraints.\n",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-floquet-metasurface-achieves-isolation-without-magnets-under-passive-bias"
      ],
      "communication_gap": "Floquet analyses often report idealized mode coupling, whereas engineering reports emphasize insertion loss and linearity; unified passivity-aware benchmarks remain immature.\n",
      "translation_table": [
        {
          "field_a_term": "Floquet harmonic coupling matrix",
          "field_b_term": "sideband conversion network for isolator-like response",
          "note": "Nonreciprocal transfer appears through asymmetric mode conversion pathways."
        },
        {
          "field_a_term": "modulation phase velocity and momentum bias",
          "field_b_term": "synthetic traveling-wave bias replacing ferrite magnets",
          "note": "Bias is programmable through modulation waveform parameters."
        },
        {
          "field_a_term": "quasienergy band engineering",
          "field_b_term": "frequency conversion selectivity and insertion-loss tradeoff",
          "note": "Practical isolation depends on balancing conversion efficiency with dissipative loss."
        }
      ],
      "references": [
        {
          "doi": "10.1126/science.1210713",
          "note": "Yu et al. (2011), phase-discontinuity metasurface control; foundational for engineered wavefront biasing."
        }
      ],
      "last_reviewed": "2026-05-08",
      "file": "cross-domain/electromagnetism-metamaterials/b-floquet-metamaterials-x-nonreciprocal-wave-mixing.yaml"
    },
    {
      "id": "b-floquet-time-modulated-metamaterial-x-nonreciprocal-electromagnetic-response",
      "title": "Periodically time-modulated electromagnetic parameters break time-reversal symmetry by Floquet engineering — enabling magnet-free nonreciprocal isolation and asymmetric dispersion without relying on helical meta-atoms or static magnetic bias (temporal metamaterials ↔ RF isolation).\n",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "Switching or parametrically pumping effective capacitance/inductance with frequency Ω introduces Floquet sidebands coupling counterpropagating modes asymmetrically — realized in staggered commutated transmission lines and photonic implementations of magnetic-free isolators. Unlike non-helical Turing",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-staggered-commutation-frequency-threshold-for-target-isolation-db"
      ],
      "communication_gap": "Photonics papers emphasize isolation ratios at λ; microwave commutated lines emphasize switch Ron/Coff — unified benchmarking vocabulary sparse across bands.\n",
      "translation_table": [
        {
          "field_a_term": "Floquet harmonic ladder ω + nΩ",
          "field_b_term": "asymmetric scattering / isolation bandwidth around carrier",
          "note": "Requires modulation strength above threshold; insertion loss trades exist."
        },
        {
          "field_a_term": "time-reversal symmetry breaking via external phase reference",
          "field_b_term": "reciprocity violation measured by S₁₂ ≠ S₂₁ in linearized descriptions",
          "note": "Linear time-invariant analysis insufficient — Floquet matrices replace S-parameters."
        },
        {
          "field_a_term": "modulation depth / duty cycle",
          "field_b_term": "isolation ratio versus insertion loss trade curve",
          "note": "Analogous to pump power in Josephson traveling-wave amplifiers conceptually (different physics layer)."
        }
      ],
      "references": [
        {
          "doi": "10.1038/s41566-017-0051-x",
          "note": "Galiffi et al. (2017) — photonics connected through time (Nat. Photon.; nonreciprocal responses via temporal modulation)."
        },
        {
          "doi": "10.1038/ncomms11217",
          "note": "Doerr et al. (2016) — magnetic-free non-reciprocity via staggered commutation (Nat. Commun.)."
        }
      ],
      "last_reviewed": "2026-05-07",
      "file": "cross-domain/electromagnetism-metamaterials/b-floquet-time-modulated-metamaterial-x-nonreciprocal-electromagnetic-response.yaml"
    },
    {
      "id": "b-nonhelical-turing-electromagnetic",
      "title": "Non-helical cavity resonators ↔ Turing-like electromagnetic pattern formation (metamaterial morphogenesis)\n",
      "source_domain": "",
      "target_domain": "",
      "status": "proposed",
      "bridge_claim": "Arrays of non-helical (meander, bifilar, or space-filling) resonators inside shielded metal cavities may exhibit spatial organization of high-Q electromagnetic modes that can be formally mapped onto activator–inhibitor dynamics akin to Turing reaction–diffusion instability: short-range “activation” ",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-nonhelical-turing-cloaking-adaptation"
      ],
      "communication_gap": "Slow-wave and metamaterial engineering typically assumes designed periodicity; developmental biology has mature Turing-instability theory in chemistry. The electromagnetic analogue—mapping field energy and phase (not concentration) to reaction–diffusion parameters—is rarely formalized in one framework, so specialists in metamaterials and pattern formation seldom share vocabulary or stability-analysis tools.\n",
      "translation_table": [
        {
          "field_a_term": "Activator diffusivity D_u (Turing)",
          "field_b_term": "Near-field coupling strength / localization of resonant modes",
          "note": "Short-range reinforcement between neighboring resonators at resonance (analogy)."
        },
        {
          "field_a_term": "Inhibitor diffusivity D_v (Turing)",
          "field_b_term": "Evanescent leakage, radiative damping, cavity-wall boundary phase shift",
          "note": "Long-range suppression of coherence across the array (analogy)."
        },
        {
          "field_a_term": "Instability threshold (ratio of effective transport scales)",
          "field_b_term": "Condition for spontaneous spatial modulation of mode amplitude or phase",
          "note": "Requires linear stability analysis of the coupled Maxwell + resonator circuit model."
        },
        {
          "field_a_term": "Characteristic pattern wavelength Lambda*",
          "field_b_term": "Spatial period of high-Q mode clusters or band-structure features",
          "note": "Would be predicted from geometry, loss, and coupling after a validated instability calculation."
        },
        {
          "field_a_term": "Stripe vs spot selection (Turing)",
          "field_b_term": "Array geometry, cavity aspect ratio, nonlinear saturation / bistability",
          "note": "Would determine morphology if an instability exists."
        }
      ],
      "references": [
        {
          "doi": "10.1098/rstb.1952.0012",
          "note": "Turing (1952) — morphogenesis via reaction–diffusion (conceptual anchor for the analogy)."
        },
        {
          "note": "No dedicated experimental literature yet identifies Turing-like pattern formation specifically in non-helical cavity resonator arrays; Kerr cavity dynamics (e.g. Lugiato–Lefever class) is a separate but related non-equilibrium pattern-formation family in optics.\n"
        }
      ],
      "last_reviewed": "2026-05-07",
      "file": "cross-domain/electromagnetism-metamaterials/b-nonhelical-turing-electromagnetic.yaml"
    },
    {
      "id": "b-biomimetic-robotics-locomotion",
      "title": "Biological locomotion principles — spring-loaded inverted pendulum (SLIP) for running, Lighthill elongated-body theory for swimming, and leading-edge vortex dynamics for flapping flight — provide quantitative engineering templates for legged, undulatory, and aerial robots, unifying evolutionary optimization with mechanical design.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "Biological locomotion has been refined over hundreds of millions of years of evolution and can be described by precise physical models that engineers can implement directly. Running (cockroach, horse, human): the spring-loaded inverted pendulum (SLIP) model reduces all running to a single spring (st",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-biomimetic-slip-locomotion-minimal-energy-cost-robots"
      ],
      "communication_gap": "Biomechanics publishes in J Exp Biology, J R Soc Interface, and Biology Letters; robotics publishes in ICRA, IROS, and IEEE Transactions. The overlap is present but limited: most roboticists re-derive biological principles independently (or approximate them crudely) because they do not read biology literature. Conversely, biologists rarely frame locomotion principles in control-theoretic language accessible to engineers. The \"bio-inspired robotics\" conferences are improving this but remain niche.\n",
      "translation_table": [
        {
          "field_a_term": "leg spring stiffness k in SLIP model",
          "field_b_term": "robotic leg spring constant in compliant running machines",
          "note": "Animals tune k via muscle co-contraction; robots use physical springs or series-elastic actuators"
        },
        {
          "field_a_term": "Strouhal number St = fA/U (swimming efficiency optimum)",
          "field_b_term": "tail-beat frequency and amplitude control in underwater robots",
          "note": "Fish self-tune to St ~ 0.3; AUV control loops should enforce same constraint"
        },
        {
          "field_a_term": "leading-edge vortex (LEV) in insect flight",
          "field_b_term": "micro-air-vehicle wing geometry for high-lift at low Reynolds number",
          "note": "LEV is stable only for Re 100–10000; MAV design must target this regime"
        },
        {
          "field_a_term": "muscle fiber pennation angle and compliance",
          "field_b_term": "pneumatic soft actuator fiber winding angle",
          "note": "McKibben actuator force-length curve mirrors pennate muscle geometry"
        },
        {
          "field_a_term": "metabolic cost of transport (COT)",
          "field_b_term": "specific energy consumption of legged robots",
          "note": "SLIP mechanics enables COT < 1 J/(N·m); many robots achieve COT > 10"
        }
      ],
      "references": [
        {
          "note": "Full & Koditschek (1999) Templates and anchors: neuromechanical hypotheses of legged locomotion. J Exp Biol 202:3325"
        },
        {
          "doi": "10.1098/rspb.1971.0085",
          "note": "Lighthill (1971) Large-amplitude elongated-body theory of fish locomotion. Proc R Soc B 179:125"
        },
        {
          "doi": "10.1126/science.288.5463.100",
          "note": "Dickinson et al. (2000) How animals move: an integrative view. Science 288:100"
        },
        {
          "note": "Raibert (1986) Legged Robots That Balance. MIT Press"
        }
      ],
      "last_reviewed": "2026-05-06",
      "file": "cross-domain/engineering-biology/b-biomimetic-robotics-locomotion.yaml"
    },
    {
      "id": "b-droplet-splitting-microfluidics-x-binary-fission-metaphor",
      "title": "Microfluidic droplet generators split aqueous plugs into daughter droplets at T-junctions or flow-focusing nozzles — an engineering control problem whose discrete daughter-size statistics loosely resemble binary branching metaphors used for cell division, **without** implying shared molecular biology or conserved scaling exponents.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "proposed",
      "bridge_claim": "Capillary instability and pressure-flow balances set deterministic or stochastic splitting ratios in microchannels (often modeled as pinch-off dynamics with noise); binary cell fission likewise partitions biomatter but is governed by cytoskeletal mechanics, checkpoints, and regulatory networks. The ",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-droplet-split-binomial-partition-fission-alignment"
      ],
      "communication_gap": "Microfluidics labs publish deterministic scaling laws while organismal biology emphasizes molecular pathways — workshops rarely compare empirical splitting distributions using identical branching-process estimators even when datasets look superficially similar.\n",
      "translation_table": [
        {
          "field_a_term": "daughter droplet volume partition at symmetric T-junction",
          "field_b_term": "daughter cell size asymmetry after cytokinesis",
          "note": "Metaphor only — fluid pinch-off physics differs from contractile ring biochemistry."
        },
        {
          "field_a_term": "flow-rate ratio Q_left / Q_total controlling split probability",
          "field_b_term": "metabolic / checkpoint asymmetry influencing division sizes",
          "note": "Analogous control knobs exist formally as inputs to stochastic maps; mechanisms unrelated."
        },
        {
          "field_a_term": "polydispersity from noisy splitting cascades",
          "field_b_term": "variability in lineage tree branch lengths / generation times",
          "note": "Branching-process mathematics may describe both datasets — domain transfer requires empirical fits."
        }
      ],
      "references": [
        {
          "doi": "10.1063/1.1537519",
          "note": "Anna, Bontoux & Stone (2003) — formation of dispersions using “flow focusing” in microchannels; Appl Phys Lett."
        },
        {
          "doi": "10.1038/nature05058",
          "note": "Whitesides (2006) — the origins and the future of microfluidics; Nature."
        }
      ],
      "last_reviewed": "2026-05-07",
      "file": "cross-domain/engineering-biology/b-droplet-splitting-microfluidics-x-binary-fission-metaphor.yaml"
    },
    {
      "id": "b-engineering-reliability-extreme-value",
      "title": "Extreme value theory (Gumbel/Weibull distributions) governs infrastructure failure, biological aging mortality, and material fatigue through the same mathematical framework of order statistics, making actuarial, structural, and materials reliability engineering mathematically unified.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "Extreme value theory (EVT) asks: given N independent random variables (component strengths, lifespans, load magnitudes), what is the distribution of the maximum or minimum? The Fisher-Tippett-Gnedenko theorem proves that the limiting distribution belongs to one of three families regardless of the or",
      "open_unknowns": [],
      "related_hypotheses": [],
      "communication_gap": "Structural engineers use Weibull analysis daily but rarely know Gompertz mortality. Actuaries use Gompertz tables but do not connect them to materials reliability. Biologists studying aging know Gompertz empirically but do not recognize it as extreme value theory. The mathematical unification (EVT as the common framework) is known to statisticians (Gumbel, 1958; de Haan & Ferreira) but has not been made accessible or useful to practitioners in any of the three applied fields simultaneously. The bridge requires only the recognition that \"weakest-link\" is a universal failure model, not a field-specific assumption.\n",
      "translation_table": [
        {
          "field_a_term": "Weibull distribution of component lifetime (engineering)",
          "field_b_term": "Gompertz mortality law (biology/actuarial)",
          "note": "Both are extreme value (Gumbel) distributions of first-failure time"
        },
        {
          "field_a_term": "Weibull shape parameter β (reliability engineering)",
          "field_b_term": "Gompertz rate parameter b (aging biology)",
          "note": "Both describe the steepness of hazard rate increase with time"
        },
        {
          "field_a_term": "Weakest-link model (structural mechanics)",
          "field_b_term": "Competing risks model (survival analysis)"
        },
        {
          "field_a_term": "Fatigue life scatter (Weibull modulus)",
          "field_b_term": "Lifespan variation across individuals (mortality scatter)"
        },
        {
          "field_a_term": "Size effect (larger specimens fail earlier)",
          "field_b_term": "Population size effect on maximum lifespan (larger populations have older max ages)"
        },
        {
          "field_a_term": "Burn-in period (infant mortality, β < 1)",
          "field_b_term": "Childhood/juvenile mortality peak"
        },
        {
          "field_a_term": "Design life (characteristic lifetime at chosen reliability level)",
          "field_b_term": "Life expectancy (median/mean lifetime at population level)"
        }
      ],
      "references": [
        {
          "doi": "10.1017/CBO9780511755019",
          "note": "de Haan & Ferreira (2006) - Extreme Value Theory: An Introduction; mathematical unification of EVT families"
        },
        {
          "doi": "10.1098/rspb.1825.0039",
          "note": "Gompertz (1825) - On the nature of the function expressive of the law of human mortality"
        },
        {
          "doi": "10.1080/01621459.1951.10500793",
          "note": "Weibull (1951) - A statistical distribution function of wide applicability; engineering reliability foundation"
        },
        {
          "doi": "10.1093/biomet/40.1-2.12",
          "note": "Gumbel (1953) - Statistical theory of extreme values and some practical applications"
        }
      ],
      "last_reviewed": "2026-05-05",
      "file": "cross-domain/engineering-biology/b-engineering-reliability-extreme-value.yaml"
    },
    {
      "id": "b-feedback-control-homeostasis",
      "title": "Feedback control theory and biological homeostasis — integral feedback is the mathematical mechanism guaranteeing perfect adaptation in both engineered PID controllers and glucose regulation",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "Biological homeostasis (blood glucose, body temperature, pH) implements integral feedback control — mathematically identical to the I term of a PID controller. The integral action guarantees zero steady-state error regardless of disturbance magnitude or type. Yi et al. (2000) proved that perfect ada",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-integral-feedback-sufficient-perfect-adaptation-living-cells"
      ],
      "communication_gap": "Control engineers and biologists rarely collaborate despite deep mathematical connections. Yi et al. (2000) is a landmark paper cited across both fields, but control-theoretic frameworks remain peripheral in most physiology curricula. Medical education teaches homeostasis descriptively without feedback control formalism. The synthetic biology community is the main bridge (Elowitz, Leibler, Khalil & Collins), but pharmacology has largely not adopted control-theoretic drug dosing optimization.\n",
      "translation_table": [
        {
          "field_a_term": "integral gain K_I in PID controller",
          "field_b_term": "transcriptional integration of error signal",
          "note": "Protein accumulation (mRNA, signaling molecules) integrates error over time"
        },
        {
          "field_a_term": "setpoint (reference signal r(t))",
          "field_b_term": "physiological reference level (e.g., 5 mM blood glucose)",
          "note": "Encoded in receptor binding constants and enzyme Km values"
        },
        {
          "field_a_term": "error signal e(t) = r(t) - y(t)",
          "field_b_term": "deviation from homeostatic set point",
          "note": "Sensed by receptor occupancy; drives downstream effector response"
        },
        {
          "field_a_term": "actuator (control signal u(t))",
          "field_b_term": "effector organ (beta cells, kidney tubules, sweat glands)",
          "note": "Converts control signal into physical correction of regulated variable"
        },
        {
          "field_a_term": "steady-state error (non-zero in P-only control)",
          "field_b_term": "offset from setpoint in proportional-only regulation",
          "note": "Integral action eliminates steady-state error; essential for exact homeostasis"
        },
        {
          "field_a_term": "actuator saturation (control signal limits)",
          "field_b_term": "insulin resistance (T2D: same insulin signal, less effect)",
          "note": "Saturation causes steady-state error even with integral action — T2D mechanism"
        }
      ],
      "references": [
        {
          "doi": "10.1073/pnas.97.9.4649",
          "note": "Yi et al. (2000) PNAS 97:4649 — perfect adaptation requires integral feedback"
        },
        {
          "doi": "10.1016/j.cell.2004.08.012",
          "note": "Stelling et al. (2004) Cell 118:675 — robustness of cellular functions"
        },
        {
          "note": "Aström & Murray (2008) Feedback Systems (Princeton UP, ISBN 0691135762) — textbook connecting control theory and biology"
        },
        {
          "doi": "10.1006/jtbi.2001.2383",
          "note": "El-Samad et al. (2002) J Theor Biol 214:17 — control-theoretic analysis of temperature compensation"
        }
      ],
      "last_reviewed": "2026-05-06",
      "file": "cross-domain/engineering-biology/b-feedback-control-homeostasis.yaml"
    },
    {
      "id": "b-organ-on-chip-microfluidics",
      "title": "Organ-on-a-chip devices are microfluidic bioreactors that recapitulate organ physiology through laminar flow and mechanical actuation — bridging MEMS engineering to cell biology and replacing animal models in drug testing",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "Organ-on-a-chip (OoC) technology bridges microfluidic engineering to organ-level physiology. At the microscale (10-1000 μm channels), Reynolds number Re = ρvL/μ << 1 ensures laminar flow — providing precise, controllable shear stress on lining cells. The Péclet number Pe = vL/D >> 1 maintains concen",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-organ-chip-multi-organ-body-on-chip-systemic-toxicity"
      ],
      "communication_gap": "Microfluidics engineers publish in Lab on a Chip and Biomicrofluidics; cell biologists publish in Nature Cell Biology and Cell. The regulatory pathway for OoC data (FDA Modernization Act 2.0) is not yet widely known in the biology community. Most OoC devices are built by engineers and then tested with cells as an afterthought, rather than being co-designed with biologists who understand the physiological endpoints.\n",
      "translation_table": [
        {
          "field_a_term": "Reynolds number Re (fluid mechanics)",
          "field_b_term": "shear stress on endothelial cells (vascular biology)",
          "note": "Low Re ensures laminar flow; the resulting wall shear stress is the mechanotransduction signal cells sense"
        },
        {
          "field_a_term": "Péclet number Pe (mass transfer engineering)",
          "field_b_term": "morphogen gradient (developmental biology)",
          "note": "High Pe maintains steep concentration gradients that guide cell differentiation — as in embryonic patterning"
        },
        {
          "field_a_term": "PDMS membrane cyclic strain (MEMS engineering)",
          "field_b_term": "alveolar breathing mechanics (pulmonary physiology)",
          "note": "The mechanical stretching signal drives lung surfactant regulation and immune cell recruitment"
        },
        {
          "field_a_term": "microfluidic flow control (engineering)",
          "field_b_term": "peristalsis simulation (gastroenterology)",
          "note": "Programmable pumping replicates intestinal peristaltic contractions in gut-on-a-chip devices"
        },
        {
          "field_a_term": "surface functionalization / extracellular matrix coating (bioengineering)",
          "field_b_term": "cell adhesion and tissue architecture (cell biology)",
          "note": "Engineering surface chemistry determines which cell types adhere and how tissues self-organize"
        }
      ],
      "references": [
        {
          "doi": "10.1126/science.1188302",
          "note": "Huh et al. (2010). Reconstituting organ-level lung functions on a chip. Science 328:1662."
        },
        {
          "doi": "10.1038/nature05058",
          "note": "Whitesides (2006). The origins and the future of microfluidics. Nature 442:368."
        },
        {
          "doi": "10.1038/nbt.2989",
          "note": "Bhatia & Ingber (2014). Microfluidic organs-on-chips. Nat Biotechnol 32:760."
        },
        {
          "doi": "10.1038/s41596-020-0329-z",
          "note": "Marx et al. (2020). Biology-inspired microphysiological systems to advance patient benefit and animal welfare in drug development. Nat Protoc 15:2049."
        }
      ],
      "last_reviewed": "2026-05-06",
      "file": "cross-domain/engineering-biology/b-organ-on-chip-microfluidics.yaml"
    },
    {
      "id": "b-prosthetic-limbs-sensorimotor",
      "title": "Prosthetic Limbs and Sensorimotor Integration — myoelectric control, osseointegration, targeted muscle reinnervation, and bidirectional neural interfaces reconnect the motor system after amputation",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "Modern prosthetic limbs span mechanical, electronic, and neural engineering. Myoelectric control uses surface electromyography (sEMG) signals from residual limb muscles: electrodes detect motor unit action potentials (20–500 Hz), amplitude- modulated signals drive proportional control of prosthetic ",
      "open_unknowns": [],
      "related_hypotheses": [],
      "communication_gap": "Prosthetics research spans mechanical engineering, biomechanics, neuroscience, surgery, and rehabilitation medicine — rarely integrated in a single research group. Commercial prosthetics companies (Ottobock, Össur, Touch Bionics) develop devices largely independently of academic neural interface research. Clinical adoption of neural interfaces remains limited by regulatory pathways, implant longevity, and the gap between laboratory demonstrations and robust clinical performance.\n",
      "translation_table": [
        {
          "field_a_term": "surface EMG (sEMG) pattern recognition",
          "field_b_term": "motor intent decoded from residual limb muscle activation patterns",
          "note": "EMG is the electrical signature of motor unit recruitment; ML classifies patterns to grip types"
        },
        {
          "field_a_term": "osseointegration (titanium–bone interface)",
          "field_b_term": "direct skeletal attachment bypassing socket interface",
          "note": "Brånemark implants achieve >95% 5-year survival in amputees; eliminate pistoning and skin breakdown"
        },
        {
          "field_a_term": "targeted muscle reinnervation (TMR)",
          "field_b_term": "surgical nerve-to-muscle rerouting to amplify prosthetic control signals",
          "note": "Each redirected nerve creates a new EMG site; high-level amputees gain proportional control of multiple joints"
        },
        {
          "field_a_term": "transversal intrafascicular electrode (TIME)",
          "field_b_term": "peripheral nerve electrode for bidirectional sensory-motor interface",
          "note": "Intraneural electrodes access individual fascicles; enables graded tactile feedback at 0.1–10 mA currents"
        },
        {
          "field_a_term": "proprioceptive feedback (joint angle, force)",
          "field_b_term": "closed-loop sensorimotor control of prosthetic joint position",
          "note": "The absent afferent signal is the critical gap; vibrotactile substitution and peripheral nerve stimulation are partial solutions"
        },
        {
          "field_a_term": "regenerative peripheral nerve interface (RPNI)",
          "field_b_term": "bioamplifier: free muscle graft wrapped around nerve stump",
          "note": "RPNI amplifies residual nerve signals ~20× without external electronics; recently shown to reduce phantom limb pain"
        }
      ],
      "references": [
        {
          "note": "Brånemark (1969) Scand J Plast Reconstr Surg 3:81 — osseointegration principle"
        },
        {
          "doi": "10.1126/scitranslmed.3006820",
          "note": "Raspopovic et al. (2014) Sci Transl Med 6:222ra19 — bidirectional peripheral nerve interface"
        },
        {
          "note": "Atzori & Müller (2015) J NeuroEng Rehab 12:38 — EMG pattern recognition review"
        },
        {
          "note": "Resnik et al. (2018) J Rehab Res Dev 49:321 — TMR clinical outcomes"
        }
      ],
      "last_reviewed": "2026-05-06",
      "file": "cross-domain/engineering-biology/b-prosthetic-limbs-sensorimotor.yaml"
    },
    {
      "id": "b-robustness-evolvability-modularity",
      "title": "The robustness-evolvability trade-off in engineering (rigid vs. adaptable design) maps onto canalization vs. evolvability in evolution (Waddington 1942, Kirschner & Gerhart 1998), and both fields solve it through near-decomposable modular architecture (Simon 1962).\n",
      "source_domain": "",
      "target_domain": "",
      "status": "proposed",
      "bridge_claim": "In engineering, two fundamental design objectives conflict:\n- ROBUSTNESS -- Resistance to perturbations (noise, damage, parameter\n  variation). Achieved by over-engineering, redundancy, tight tolerances.\n- EVOLVABILITY -- Ease of modification for new functionality. Achieved by\n  loose coupling, modu",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-modular-architecture-robustness-evolvability",
        "h-gompertz-weibull-aging-unification"
      ],
      "communication_gap": "Waddington (1942) worked in developmental biology; Simon (1962) worked in cognitive science and organisational theory; Kirschner & Gerhart (1998) worked in cell biology; Wagner (2005) in evolutionary theory. Engineering robustness literature (FMEA, reliability engineering) is largely inaccessible to biologists. The mathematical tools are similar (sensitivity analysis, modularity metrics) but the vocabulary differs completely. The explicit connection between near-decomposability as an engineering design principle and modular developmental organisation as an evolutionary solution has been made by Wagner and Kirschner independently but has not produced cross-disciplinary tool-sharing.\n",
      "translation_table": [
        {
          "field_a_term": "Mechanical robustness (resistance to stress-strain perturbation)",
          "field_b_term": "Genetic canalization (buffering of mutations by developmental systems)",
          "note": "Both measure resistance of system output to input perturbation"
        },
        {
          "field_a_term": "Sensitivity analysis ∂y/∂x (engineering)",
          "field_b_term": "Epistasis coefficient ε_{ij} in gene regulatory networks",
          "note": "Quantitative measure of coupling between system components"
        },
        {
          "field_a_term": "Near-decomposable architecture (Simon 1962)",
          "field_b_term": "Modular gene regulatory networks and developmental compartments",
          "note": "Both separate fast within-module dynamics from slow between-module coupling"
        },
        {
          "field_a_term": "Failure mode analysis / FMEA",
          "field_b_term": "Phenotypic plasticity limits and developmental constraints",
          "note": "Both map which perturbations exceed the system's buffering capacity"
        },
        {
          "field_a_term": "Design-for-manufacturability (DFM) — standardised interfaces",
          "field_b_term": "Conserved core developmental processes with variable periphery",
          "note": "Kirschner & Gerhart: conserved core = stable interface; variable periphery = evolvable module"
        },
        {
          "field_a_term": "Robust-yet-fragile property (tightly optimised systems fail catastrophically at edges)",
          "field_b_term": "Canalized organisms brittle under novel stressors (Hsp90 capacitor mechanism)",
          "note": "Rutherford & Lindquist (1998): heat shock releases cryptic variation"
        },
        {
          "field_a_term": "Modular redesign (swap module without system-wide cascade)",
          "field_b_term": "Exon shuffling / domain swapping in protein evolution",
          "note": "Modular protein architecture enables evolutionary innovation by recombination"
        }
      ],
      "references": [
        {
          "doi": "10.1038/150563a0",
          "note": "Waddington (1942) Nature 150:563 — canalization and the epigenetic landscape"
        },
        {
          "doi": "10.1073/pnas.95.15.8420",
          "note": "Kirschner & Gerhart (1998) PNAS 95:8420 — evolvability as facilitated variation"
        },
        {
          "note": "Simon (1962) Proc Am Phil Soc 106:467 — architecture of complexity; near-decomposability"
        },
        {
          "note": "Wagner (2005) Robustness and Evolvability in Living Systems (Princeton University Press)"
        },
        {
          "doi": "10.1038/38593",
          "note": "Rutherford & Lindquist (1998) Nature 396:336 — Hsp90 as capacitor for morphological evolution"
        }
      ],
      "last_reviewed": "2026-05-05",
      "file": "cross-domain/engineering-biology/b-robustness-evolvability-modularity.yaml"
    },
    {
      "id": "b-swarm-robotics-stigmergy",
      "title": "Swarm-robotic path optimisation via pheromone-inspired digital trails is formally equivalent to ant-colony stigmergy: both systems converge to shortest paths through positive feedback on good solutions and evaporation of poor ones, described by the same differential equations governing ant trail-pheromone dynamics.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "In ant colonies, workers deposit pheromone on return from food sources; shorter trails accumulate pheromone faster (more round trips per unit time), attracting more ants until the colony commits to the shortest path. Dorigo's Ant Colony Optimisation (ACO, 1992) encodes this in discrete graphs: edge ",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-swarm-pheromone-convergence-rate"
      ],
      "communication_gap": "Entomologists studying ant trail formation and computer scientists developing metaheuristic optimisation publish in separate venues (Insectes Sociaux vs. IEEE TAES / Operations Research). The formal mathematical equivalence between biological pheromone dynamics and ACO update rules was stated by Dorigo 1992 but is rarely cited in the biological literature.\n",
      "translation_table": [
        {
          "field_a_term": "Ant pheromone trail on path segment",
          "field_b_term": "Digital pheromone field on graph edge (swarm robot)",
          "note": "Both accumulate proportional to usage frequency; evaporate at rate ρ"
        },
        {
          "field_a_term": "Ant path-selection probability ∝ τ^α × η^β",
          "field_b_term": "Robot next-hop probability in ACO routing (same formula)",
          "note": "α, β weight pheromone vs. heuristic information; tunable in both systems"
        },
        {
          "field_a_term": "Pheromone evaporation rate ρ",
          "field_b_term": "Forgetting / stale-route decay parameter in digital systems",
          "note": "Evaporation prevents premature convergence to suboptimal solutions in both domains"
        },
        {
          "field_a_term": "Stigmergic self-organisation (no central control)",
          "field_b_term": "Distributed swarm-robot coordination via shared environment",
          "note": "Both systems encode solutions in the environment rather than in agent memory"
        }
      ],
      "references": [
        {
          "doi": "10.1142/S0218213006002813",
          "note": "Dorigo & Stutzle (2002) – Ant Colony Optimization: algorithmic overview and convergence proofs"
        },
        {
          "doi": "10.1073/pnas.89.12.5979",
          "note": "Goss et al. (1989) PNAS – ant trail selection experiment demonstrating shortest-path emergence"
        },
        {
          "doi": "10.1038/s41598-017-11585-9",
          "note": "Perna & Latty (2014) – transport networks in biology and ACO: formal comparison"
        },
        {
          "arxiv": "1010.4982",
          "note": "Camazine et al. – Self-Organisation in Biological Systems: stigmergy chapter"
        }
      ],
      "last_reviewed": "2026-05-07",
      "file": "cross-domain/engineering-biology/b-swarm-robotics-stigmergy.yaml"
    },
    {
      "id": "b-synthetic-biology-genetic-circuits",
      "title": "Synthetic biology applies electronic circuit design principles to genetic systems — using transcription factors as NOT/AND/NOR gates, implementing the repressilator (genetic ring oscillator) and toggle switch (genetic flip-flop), and employing transfer functions and Bode plots from control theory to engineer programmable living systems.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "Elowitz & Leibler (2000) and Gardner et al. (2000) — published simultaneously in Nature — demonstrated that gene regulatory networks can be engineered to implement electronic circuit functions. The repressilator (Elowitz & Leibler) is a synthetic genetic ring oscillator: three genes (tetR, lacI, cI)",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-biological-computing-turing-completeness"
      ],
      "communication_gap": "Electrical engineering and control theory training is not part of standard biology PhD programs. The formalisms of transfer functions and Bode plots appear in the synthetic biology literature but are rarely derived from first principles for biological audiences. Conversely, most electrical engineers are unaware that their circuit formalism has been implemented in DNA. The iGEM (International Genetically Engineered Machine) competition has done more than any academic program to bridge this gap through engineering-focused biological design education.\n",
      "translation_table": [
        {
          "field_a_term": "NOT gate (inverter)",
          "field_b_term": "transcriptional repressor (e.g. TetR represses P_tet promoter)",
          "note": "protein A represses gene B = A is a NOT gate for B"
        },
        {
          "field_a_term": "AND gate",
          "field_b_term": "split intein / split protein requiring both fragments for activity",
          "note": "output only when both inputs present simultaneously"
        },
        {
          "field_a_term": "ring oscillator (3-stage NOT chain)",
          "field_b_term": "repressilator (3-gene mutual repression cycle)",
          "note": "Elowitz & Leibler (2000); period set by protein degradation rates"
        },
        {
          "field_a_term": "flip-flop / bistable latch",
          "field_b_term": "genetic toggle switch (mutual repression bistability)",
          "note": "Gardner et al. (2000); two stable states encode binary memory"
        },
        {
          "field_a_term": "component library (standardized electronic parts)",
          "field_b_term": "BioBrick registry (standardized genetic parts with defined interfaces)"
        },
        {
          "field_a_term": "transfer function (input-output gain and phase)",
          "field_b_term": "dose-response curve of a genetic promoter (Hill function)"
        }
      ],
      "references": [
        {
          "doi": "10.1038/35002125",
          "note": "Elowitz & Leibler (2000) — A synthetic oscillatory network of transcriptional regulators (repressilator); Nature 403:335"
        },
        {
          "doi": "10.1038/35002131",
          "note": "Gardner et al. (2000) — Construction of a genetic toggle switch in Escherichia coli; Nature 403:339"
        },
        {
          "doi": "10.1038/msb4100073",
          "note": "Andrianantoandro et al. (2006) — Synthetic biology; new engineering rules for an emerging discipline; Mol Syst Biol 2:2006.0028"
        },
        {
          "doi": "10.1126/science.1252749",
          "note": "Cameron et al. (2014) — Tunable protein degradation in bacteria; Science 346:1258829"
        }
      ],
      "last_reviewed": "2026-05-06",
      "file": "cross-domain/engineering-biology/b-synthetic-biology-genetic-circuits.yaml"
    },
    {
      "id": "b-tensegrity-cytoskeleton",
      "title": "Buckminster Fuller's tensegrity (tensional integrity) structures — where compression members float in a continuous tension network — are the mechanical principle governing cytoskeletal architecture; actin filaments (tension) and microtubules (compression) form a biological tensegrity network predicting cell stiffness, shape change, and mechanotransduction.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "Fuller (1961) defined tensegrity as a structural principle where isolated compression members (\"struts\") are suspended in a continuous network of tension members (\"cables\"). The structure is globally stable without any compression member touching another — stability arises from the interplay of tens",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-tensegrity-cancer-mechanics"
      ],
      "communication_gap": "Fuller worked in architecture and engineering; his tensegrity structures were known in the art and architecture communities from the 1960s but not in cell biology. Ingber's 1993 Scientific American article made the connection accessible but the engineering tensegrity literature remains largely unknown to cell biologists. Cell mechanics researchers use continuum mechanics frameworks (cortical shell models, soft glassy rheology) and are not familiar with the structural engineering of geodesic domes. The vocabulary gap (\"prestress\" vs. \"cortical tension\", \"compliance\" vs. \"creep\") maintains the separation despite the mathematical identity.\n",
      "translation_table": [
        {
          "field_a_term": "Struts (compression members in tensegrity)",
          "field_b_term": "Microtubules (compression-bearing elements in cytoskeleton)",
          "note": "Both carry compressive loads; both are discrete members surrounded by continuous tension network"
        },
        {
          "field_a_term": "Cables (tension members in tensegrity)",
          "field_b_term": "Actin filaments (tension-carrying pre-stressed cables)",
          "note": "Both are continuously pre-stressed in tension; primary load-bearing network"
        },
        {
          "field_a_term": "Ground anchors (tensegrity attachment points)",
          "field_b_term": "Focal adhesions (ECM attachment complexes)",
          "note": "Both provide external anchorage that transmits forces from the global structure"
        },
        {
          "field_a_term": "Prestress (initial tension in cables)",
          "field_b_term": "Actin cortex tension (myosin-II-generated cortical tension)",
          "note": "Both determine structural stiffness independently of material properties"
        },
        {
          "field_a_term": "Global structural stiffness (scales with prestress)",
          "field_b_term": "Cell Young's modulus (measured by AFM indentation)",
          "note": "Prediction: E ∝ cortical tension; confirmed by blebbistatin/cytochalasin experiments"
        },
        {
          "field_a_term": "Force transmission through strut network",
          "field_b_term": "Mechanotransduction (force propagation to nucleus)",
          "note": "Tensegrity predicts long-range force transmission; confirmed by nuclear distortion experiments"
        }
      ],
      "references": [
        {
          "note": "Fuller (1961) Portfolio and Art News Annual 4:112 — tensegrity definition"
        },
        {
          "note": "Ingber (1993) Sci Am 268(1):48 — cellular tensegrity hypothesis"
        },
        {
          "doi": "10.1126/science.7684161",
          "note": "Wang, Butler & Ingber (1993) Science 260:1124 — mechanical coupling via integrins"
        },
        {
          "doi": "10.1007/s10237-002-0015-y",
          "note": "Stamenović & Ingber (2002) Biomech Model Mechanobiol 1:95 — tensegrity and cell deformability"
        }
      ],
      "last_reviewed": "2026-05-06",
      "file": "cross-domain/engineering-biology/b-tensegrity-cytoskeleton.yaml"
    },
    {
      "id": "b-electron-microscopy-materials-characterization",
      "title": "Transmission electron microscopy — exploiting the de Broglie wavelength of electrons (λ ≈ 2.5 pm at 200 kV, 100× shorter than visible light) to diffract from atomic planes and form phase-contrast images resolving individual atomic columns at 50 pm — bridges quantum mechanics of electron-matter interaction to materials and biological structure determination, culminating in cryo-EM resolving protein structures at 1.2 Å (Nobel Chemistry 2017).\n",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "Transmission electron microscopy (TEM) exploits the quantum mechanical wave nature of electrons. The de Broglie wavelength of electrons accelerated through voltage V is λ = h/√(2meV) ≈ 2.51 pm at 200 kV (relativistic correction reduces this to ~1.97 pm). This is 100,000× shorter than visible light, ",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-cryo-em-membrane-protein-structures-without-detergent-native-lipid-bilayer"
      ],
      "communication_gap": "Electron microscopy is taught in materials science departments (HRTEM, STEM for materials) and structural biology departments (cryo-EM for proteins) with minimal cross-pollination. The quantum mechanical theory of image formation (multislice simulation, Bloch wave theory) is known to instrument scientists but rarely taught in biology or even materials engineering programs. The Nobel Prize for cryo-EM (2017) brought the technique to wide attention in biology, but the underlying quantum optics and electron-matter interaction physics remains invisible to most users of the technique.\n",
      "translation_table": [
        {
          "field_a_term": "de Broglie wavelength λ = h/√(2meV) (quantum mechanics)",
          "field_b_term": "Ultimate resolution limit of the electron microscope at given voltage",
          "note": "λ ≈ 2 pm at 200 kV — resolution is lens-limited (Cs) not wavelength-limited"
        },
        {
          "field_a_term": "Bragg diffraction nλ = 2d sinθ (crystallography)",
          "field_b_term": "Selected area electron diffraction (SAED) pattern for crystal structure",
          "note": "Diffraction spots give d-spacing and crystal symmetry; used for phase identification"
        },
        {
          "field_a_term": "Spherical aberration Cs (electron optics)",
          "field_b_term": "Resolution-limiting lens imperfection corrected by aberration correctors",
          "note": "Aberration correction reduced Cs from mm to μm range, enabling 50 pm HRTEM"
        },
        {
          "field_a_term": "HAADF intensity ∝ Z^1.7 (quantum scattering)",
          "field_b_term": "Atomic number contrast for direct heavy-atom identification",
          "note": "Z-contrast imaging identifies catalyst nanoparticle composition at single-atom level"
        },
        {
          "field_a_term": "Characteristic X-ray emission (inner-shell ionisation)",
          "field_b_term": "EDS elemental mapping at nm-scale spatial resolution",
          "note": "Single-atom EDS sensitivity achieved; maps dopant distribution in semiconductor devices"
        },
        {
          "field_a_term": "Vitrification at -196°C (cryogenic sample preparation)",
          "field_b_term": "Radiation damage mitigation enabling biological cryo-EM",
          "note": "Vitreous ice reduces electron dose required per image; enables SNR sufficient for averaging"
        }
      ],
      "references": [
        {
          "note": "Williams & Carter (2009) Transmission Electron Microscopy: A Textbook for Materials Science, 2nd ed., Springer — comprehensive materials TEM text\n"
        },
        {
          "note": "Pennycook & Nellist, eds. (2011) Scanning Transmission Electron Microscopy: Imaging and Analysis, Springer — STEM-HAADF and STEM-EDS methods\n"
        },
        {
          "doi": "10.1126/science.1247675",
          "note": "Kühlbrandt (2014) The resolution revolution, Science 343:1443 — review of the cryo-EM resolution revolution enabled by direct electron detectors\n"
        },
        {
          "doi": "10.1016/j.cell.2015.03.043",
          "note": "Cheng (2015) Single-particle cryo-EM at crystallographic resolution, Cell 161:450 — cryo-EM achieving 2 Å and beyond, rivalling X-ray crystallography\n"
        }
      ],
      "last_reviewed": "2026-05-06",
      "file": "cross-domain/engineering-chemistry/b-electron-microscopy-materials-characterization.yaml"
    },
    {
      "id": "b-distributed-systems-consensus",
      "title": "The Fischer-Lynch-Paterson impossibility theorem (1985) proves no deterministic consensus algorithm terminates in asynchronous systems with even one failure; Paxos achieves consensus under fail-stop in 2 message rounds; Byzantine fault tolerance requires 3f+1 processes; the CAP theorem limits distributed systems to two of three properties — mathematical theorems with direct engineering consequences for cloud storage, blockchain, and distributed databases.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "Fischer-Lynch-Paterson (FLP) impossibility (1985): in an asynchronous system where messages may be delayed arbitrarily and at least one process may fail silently, no deterministic algorithm can guarantee both safety (all processes agree) and liveness (they eventually decide). This is a mathematical ",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-cap-theorem-pacelc-extension"
      ],
      "communication_gap": "Distributed systems theory and formal verification research publish in PODC/DISC/FOCS; systems engineering of cloud databases uses different vocabulary (eventual consistency, strong consistency) without always tracing these to the formal theorems. Blockchain developers frequently reinvent consensus mechanisms without awareness of impossibility results from 1985 that constrain their designs. The connection to game theory and Byzantine failures in rational agents is even less well-known.\n",
      "translation_table": [
        {
          "field_a_term": "bivalency argument in FLP",
          "field_b_term": "diagonalization argument in computability theory",
          "note": "both construct situations where no fixed answer can be reached by a deterministic algorithm"
        },
        {
          "field_a_term": "quorum in Paxos (majority 2f+1)",
          "field_b_term": "majority voting with f tolerated failures",
          "note": "quorum intersection property ensures two quorums always share at least one member"
        },
        {
          "field_a_term": "Byzantine generals problem (3f+1 bound)",
          "field_b_term": "error-correcting code distance requirement for Byzantine agreement",
          "note": "3f+1 bound is tight; cryptographic authentication reduces to 2f+1 (authenticated Byzantine)"
        },
        {
          "field_a_term": "CAP consistency (linearizability)",
          "field_b_term": "sequential consistency / serializability in database transactions",
          "note": "linearizability is the strongest consistency model; TCC/EC are weaker alternatives"
        },
        {
          "field_a_term": "blockchain Nakamoto consensus (probabilistic finality)",
          "field_b_term": "Paxos/PBFT deterministic finality with 2f+1 quorum",
          "note": "Nakamoto consensus sacrifices deterministic safety for open membership and incentive compatibility"
        }
      ],
      "references": [
        {
          "doi": "10.1145/3149.214121",
          "note": "Fischer et al. (1985) Impossibility of distributed consensus with one faulty process. J ACM 32:374–382"
        },
        {
          "note": "Lamport (1998/2001) The Part-Time Parliament (Paxos). ACM Trans Comput Syst 16:133–169"
        },
        {
          "doi": "10.1145/357172.357176",
          "note": "Lamport, Shostak & Pease (1982) The Byzantine generals problem. ACM Trans Comput Syst 4:382–401"
        },
        {
          "note": "Brewer (2000) Towards robust distributed systems. PODC 2000 keynote; Gilbert & Lynch (2002) Brewers conjecture. ACM SIGACT News 33:51–59"
        }
      ],
      "last_reviewed": "2026-05-06",
      "file": "cross-domain/engineering-computer-science/b-distributed-systems-consensus.yaml"
    },
    {
      "id": "b-skin-depth-shielding-x-financial-firewall-layers",
      "title": "Electromagnetic skin depth and layered shielding ↔ depth and segmentation of financial “firewalls” between institutions (engineering ↔ economics; analogy strength moderate)\n",
      "source_domain": "",
      "target_domain": "",
      "status": "proposed",
      "bridge_claim": "Good conductors attenuate time-harmonic fields exponentially with depth set by the skin depth delta ~ sqrt(2/(omega mu sigma)), so successive metal layers separated by gaps act as cascaded exponential attenuators. Financial regulation analogously stacks capital buffers, ring-fencing, and resolution ",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-layered-em-shielding-financial-firewall-depth-ratio-analogy"
      ],
      "communication_gap": "Analogy strength: moderate. RF engineers quantify shielding with S-parameters and seam geometry; risk officers quantify loss given default and network contagion with different data. The exponential-decay picture is pedagogically useful but easy to overfit to balance-sheet narratives without empirical calibration. Specialists rarely share measurement protocols or uncertainty budgets across the boundary.\n",
      "translation_table": [
        {
          "field_a_term": "skin depth delta(omega)",
          "field_b_term": "effective shock penetration depth into a ring-fenced group",
          "note": "Higher frequency EM ↔ faster market shocks; deeper attenuation requires thicker “stack” or higher effective conductivity analog."
        },
        {
          "field_a_term": "surface impedance / boundary conditions at each interface",
          "field_b_term": "contractual triggers, collateral haircuts, and early-warning covenants",
          "note": "Discontinuities in material properties map loosely to step changes in loss-absorption rules."
        },
        {
          "field_a_term": "shielded-enclosure leakage through seams and apertures",
          "field_b_term": "residual exposure through off-balance-sheet vehicles and correlated counterparty channels",
          "note": "Dominant failure mode is often the gap, not bulk material—parallel to regulatory perimeter leaks."
        }
      ],
      "references": [
        {
          "doi": "10.1109/TEMC.2008.2005285",
          "note": "Konefal et al. (2009) — shielding effectiveness and practical enclosure leakage themes (methods anchor)."
        },
        {
          "doi": "10.1093/rfs/hhm043",
          "note": "Gai & Kapadia (2010) — financial network contagion thresholds (network-side anchor for the analogy)."
        }
      ],
      "last_reviewed": "2026-05-07",
      "file": "cross-domain/engineering-economics/b-skin-depth-shielding-x-financial-firewall-layers.yaml"
    },
    {
      "id": "b-microfluidics-stokes-flow-low-reynolds-number",
      "title": "Microfluidic devices operate in the low-Reynolds-number Stokes flow regime where viscosity dominates inertia, enabling exact analytical solutions (Stokes equations) and reversible, programmable flow patterns that are exploited in lab-on-a-chip technologies for biological assays.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "At Re ≪ 1 (typical microfluidic channels: Re ~ 10⁻³–10⁻¹), the Navier-Stokes equations reduce to the Stokes equations: η∇²u = ∇p, ∇·u = 0. These are linear and time-reversible (Purcell's scallop theorem). Exact solutions exist for arbitrary channel geometries via the Hele-Shaw approximation: u ≈ −(h",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-stokes-flow-deterministic-lateral-displacement-cell-sorting"
      ],
      "communication_gap": "Microfluidic engineers often use finite-element simulation (COMSOL) without deriving Stokes flow analytical solutions that would give clearer scaling laws; conversely, fluid mechanics theorists rarely engage with the specific constraints of PDMS fabrication and biological sample handling.\n",
      "translation_table": [
        {
          "field_a_term": "microfluidic channel flow (engineering)",
          "field_b_term": "Stokes flow / creeping flow (fluid mechanics)",
          "note": "Channel Re ~ 10⁻²; inertial terms (Re·Du/Dt) are negligible vs. viscous terms"
        },
        {
          "field_a_term": "droplet generation at T-junction (engineering)",
          "field_b_term": "Rayleigh-Plateau instability in Stokes regime (fluid mechanics)",
          "note": "Droplet size set by capillary number Ca = ηU/γ; Stokes flow controls pinch-off timing"
        },
        {
          "field_a_term": "Dean flow in curved microchannels (engineering)",
          "field_b_term": "secondary flow / inertial correction to Stokes (fluid mechanics)",
          "note": "Weak inertia (Re·δ/R) generates cross-stream Dean vortices used for cell focusing"
        },
        {
          "field_a_term": "electro-osmotic flow in microchannels (engineering)",
          "field_b_term": "Stokes equation with body force (Debye layer driving term) (fluid mechanics)",
          "note": "EOF obeys Helmholtz-Smoluchowski: u_EOF = −(εζ/η)E — a Stokes flow driven by electric body force"
        }
      ],
      "references": [
        {
          "doi": "10.1146/annurev.fluid.36.050802.122124",
          "note": "Squires & Quake (2005) — microfluidics — fluid physics at the nanoliter scale"
        },
        {
          "doi": "10.1073/pnas.97.7.3118",
          "note": "Whitesides & Stroock (2001) — flexible methods for microfluidics"
        },
        {
          "doi": "10.1073/pnas.97.9.5083",
          "note": "Stroock et al. (2002) — chaotic mixer for microchannels using herringbone ridges"
        }
      ],
      "last_reviewed": "2026-05-07",
      "file": "cross-domain/engineering-fluid-mechanics/b-microfluidics-stokes-flow-low-reynolds-number.yaml"
    },
    {
      "id": "b-wind-turbine-betz-limit-actuator-disk",
      "title": "The Betz limit (C_P,max = 16/27 ≈ 59.3%) is the maximum fraction of wind kinetic energy extractable by an ideal actuator disk, derived from momentum theory for incompressible inviscid flow through a streamtube, and sets the theoretical upper bound on wind turbine power coefficient\n",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "Actuator disk theory models a wind turbine as a permeable disk of area A that extracts momentum from a streamtube: applying conservation of mass, momentum, and energy to the upstream-disk-downstream control volume gives power coefficient C_P = 4a(1-a)² maximized at induction factor a = 1/3, yielding",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-betz-limit-array-cooperation-exceeds-individual"
      ],
      "communication_gap": "Wind energy engineers use C_P and Betz limit as empirical benchmarks while fluid mechanicists derive actuator disk theory from first principles; the extension of Betz theory to real turbines (Glauert tip losses, wake rotation, viscous effects) and to turbine arrays (wake interaction reducing collective efficiency) is active research at the fluid mechanics–engineering interface.\n",
      "translation_table": [
        {
          "field_a_term": "wind turbine power coefficient C_P = P/(½ρAV³) (engineering)",
          "field_b_term": "normalized energy extraction efficiency of an actuator disk in uniform flow (fluid mechanics)",
          "note": "C_P = 4a(1-a)² is the exact actuator disk result; modern turbines achieve C_P ≈ 0.45-0.50 in practice"
        },
        {
          "field_a_term": "axial induction factor a = (V_∞ - V_disk)/V_∞ (engineering)",
          "field_b_term": "fractional velocity deficit at the actuator disk plane (fluid mechanics)",
          "note": "a = 1/3 maximizes power extraction; a > 0.5 violates momentum theory (Betz limit region)"
        },
        {
          "field_a_term": "turbine thrust coefficient C_T = T/(½ρAV²) (engineering)",
          "field_b_term": "axial force on actuator disk from streamtube momentum flux (fluid mechanics)",
          "note": "C_T = 4a(1-a) relates to turbine structural loads; C_T and C_P are connected by C_P = C_T(1-a)"
        },
        {
          "field_a_term": "wake expansion behind turbine (engineering)",
          "field_b_term": "streamtube expansion in actuator disk far-wake (fluid mechanics)",
          "note": "Wake expands to area A_wake = A/(1-2a) at far wake; velocity deficit V_wake = V_∞(1-2a) by continuity"
        }
      ],
      "references": [
        {
          "doi": "10.1002/we.1496",
          "note": "Okulov & Sørensen (2010) - maximum efficiency of wind turbine rotors using Joukowsky and Betz approaches"
        },
        {
          "doi": "10.1017/jfm.2012.292",
          "note": "Sørensen (2011) - aerodynamic aspects of wind energy conversion"
        }
      ],
      "last_reviewed": "2026-05-07",
      "file": "cross-domain/engineering-fluid-mechanics/b-wind-turbine-betz-limit-actuator-disk.yaml"
    },
    {
      "id": "b-geothermal-energy-subsurface-heat-transport",
      "title": "Geothermal energy extraction requires modeling subsurface heat and fluid transport governed by coupled thermoporoelastic equations, connecting reservoir engineering to geophysics and the mathematics of heat diffusion in fractured porous media.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "A geothermal reservoir is described by Biot's thermoporoelastic theory: fluid pressure, temperature, and stress are coupled through Darcy flow (u = −(k/η)∇p), Fourier heat conduction (q = −λ∇T), and elastic equilibrium (∇·σ = 0). Fracture permeability dominates over matrix permeability (discrete fra",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-geothermal-doublet-thermal-lifetime-péclet-scaling"
      ],
      "communication_gap": "Geothermal engineers and geophysicists share coupled equations but work in separate professional communities (geothermal power industry vs. academic geophysics); induced seismicity risk from EGS has created regulatory challenges requiring closer collaboration between engineers optimizing for extraction and geophysicists studying fault mechanics.\n",
      "translation_table": [
        {
          "field_a_term": "geothermal reservoir permeability (engineering)",
          "field_b_term": "hydraulic conductivity tensor in Darcy flow (geophysics)",
          "note": "Fracture permeability k = w³/12 (cubic law) dominates matrix in crystalline rock"
        },
        {
          "field_a_term": "thermal breakthrough time (engineering)",
          "field_b_term": "Péclet number in advection-diffusion equation (geophysics)",
          "note": "High Pe → advection-dominated; thermal front moves with fluid velocity"
        },
        {
          "field_a_term": "enhanced geothermal system (EGS) hydraulic stimulation (engineering)",
          "field_b_term": "pore-pressure-induced fault reactivation / Mohr-Coulomb criterion (geophysics)",
          "note": "Fluid injection reduces effective normal stress: σ'_n = σ_n − p, triggering slip"
        },
        {
          "field_a_term": "doublet well configuration (engineering)",
          "field_b_term": "dipole source-sink in porous medium (geophysics)",
          "note": "Injection-production doublet creates a Rankine dipole flow field in the aquifer"
        }
      ],
      "references": [
        {
          "doi": "10.1016/j.geothermics.2010.09.005",
          "note": "Tester et al. (2006) — future of geothermal energy (MIT report on EGS)"
        },
        {
          "doi": "10.1007/BF01837056",
          "note": "Biot (1956) — thermoelasticity and irreversible thermodynamics in porous media"
        },
        {
          "doi": "10.1016/j.scitotenv.2018.07.098",
          "note": "Grigoli et al. (2018) — induced seismicity risk from geothermal stimulation"
        }
      ],
      "last_reviewed": "2026-05-07",
      "file": "cross-domain/engineering-geophysics/b-geothermal-energy-subsurface-heat-transport.yaml"
    },
    {
      "id": "b-graph-transformer-x-grid-contingency-screening",
      "title": "Graph-transformer relational attention bridges power-grid topology reasoning and fast contingency screening under N-1 constraints.",
      "source_domain": "",
      "target_domain": "",
      "status": "proposed",
      "bridge_claim": "Speculative analogy (to be empirically validated): Graph-transformer attention can approximate contingency ranking functions similarly to fast security-assessment heuristics derived from network sensitivity factors in grid operations.",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-graph-transformer-improves-grid-contingency-screening-recall"
      ],
      "communication_gap": "Domain operators prioritize interpretable reliability diagnostics, while ML work often prioritizes aggregate accuracy without deployment-grade uncertainty audits.",
      "translation_table": [
        {
          "field_a_term": "model prior",
          "field_b_term": "domain prior",
          "note": "Both constrain inference in data-sparse regimes."
        },
        {
          "field_a_term": "uncertainty estimate",
          "field_b_term": "risk-aware decision support",
          "note": "Uncertainty quality determines practical utility."
        },
        {
          "field_a_term": "out-of-distribution behavior",
          "field_b_term": "deployment robustness",
          "note": "Shift sensitivity governs real-world reliability."
        }
      ],
      "references": [
        {
          "arxiv": "2012.09699",
          "note": "Graph Transformer architecture."
        }
      ],
      "last_reviewed": "2026-05-08",
      "file": "cross-domain/engineering-machine-learning/b-graph-transformer-x-grid-contingency-screening.yaml"
    },
    {
      "id": "b-air-traffic-control-queueing-theory",
      "title": "Air traffic control capacity and delay are governed by queueing theory, with runway throughput following Little's law (L = lambda * W) and delay scaling nonlinearly with utilisation via the Pollaczek-Khinchine formula — making airport capacity management a direct engineering application of stochastic process theory.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "proposed",
      "bridge_claim": "An airport runway is a single-server queue: arriving aircraft (customers) are served at rate mu (landings/hour), and arrivals follow a Poisson process at rate lambda. Queueing theory provides exact results: Little's law L = lambda * W relates the average number of aircraft in the holding pattern to ",
      "open_unknowns": [],
      "related_hypotheses": [],
      "communication_gap": "",
      "translation_table": [
        {
          "field_a_term": "arrival rate lambda (Poisson process)",
          "field_b_term": "aircraft landing request rate (flights per hour)",
          "note": "Poisson arrival is a good approximation for unscheduled general aviation"
        },
        {
          "field_a_term": "service rate mu (exponential or general)",
          "field_b_term": "runway capacity (operations per hour under standard separation)",
          "note": "mu depends on wake vortex separation requirements — heavy/medium/light aircraft mix"
        },
        {
          "field_a_term": "utilisation rho = lambda/mu",
          "field_b_term": "runway load factor (fraction of capacity used)",
          "note": "Delay ~ rho^2 / (1 - rho); above ~85% utilisation, delays grow faster than linear"
        },
        {
          "field_a_term": "Little's law: L = lambda * W",
          "field_b_term": "holding stack size = arrival rate times average holding time",
          "note": "A model-free, distribution-free result — applies to all queuing disciplines"
        }
      ],
      "references": [
        {
          "doi": "10.1287/opre.9.3.383",
          "note": "Little (1961) — a proof for the queuing formula L = lambda W"
        },
        {
          "doi": "10.1145/3338765",
          "note": "Cao et al. (2019) — air traffic management and queueing theory"
        },
        {
          "doi": "10.1287/trsc.2013.0478",
          "note": "Jacquillat & Odoni (2015) — an integrated scheduling and operations approach to airport congestion mitigation"
        }
      ],
      "last_reviewed": "2026-05-07",
      "file": "cross-domain/engineering-mathematics/b-air-traffic-control-queueing-theory.yaml"
    },
    {
      "id": "b-control-theory-differential-geometry",
      "title": "Modern nonlinear control theory is formulated on differential manifolds — controllability is determined by the Lie bracket structure of vector fields (Chow-Rashevsky theorem), optimal trajectories are geodesics on sub-Riemannian manifolds, and robotics kinematics is fibre bundle theory — making differential geometry the natural language of nonlinear systems engineering.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "Classical linear control theory (state-space, Kalman, LQR) operates on ℝⁿ with no geometric structure. From the 1960s onward, Pontryagin, Brockett, Sussmann, Jurdjevic, and others reformulated nonlinear control on smooth manifolds, revealing deep connections to differential geometry:\n1. Chow-Rashevs",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-lie-bracket-depth-complexity-robot-planning"
      ],
      "communication_gap": "Differential geometers and control engineers publish in Mathematics journals (J Differential Geometry, Annals of Mathematics) and IEEE/SIAM control journals (IEEE TAC, SIAM J Control Optim) respectively with limited cross-reading. Brockett (Harvard) and Jurdjevic (Toronto) bridged these communities from the 1970s onward, but geometric control remains a specialised subfield. Most robotics engineers use numerical Jacobian methods without awareness of the underlying fibre bundle structure; most differential geometers are unaware that their sub-Riemannian geodesics are robot paths.\n",
      "translation_table": [
        {
          "field_a_term": "state space ℝⁿ (linear control)",
          "field_b_term": "smooth manifold M (nonlinear control / differential geometry)"
        },
        {
          "field_a_term": "controllability matrix rank condition (linear systems)",
          "field_b_term": "Lie algebra rank condition (Chow-Rashevsky theorem)"
        },
        {
          "field_a_term": "Lie bracket [f, g] (vector fields)",
          "field_b_term": "commutator of robot joint motions (robotics)"
        },
        {
          "field_a_term": "optimal trajectory (PMP)",
          "field_b_term": "geodesic on sub-Riemannian manifold"
        },
        {
          "field_a_term": "Jacobian matrix J (robotics kinematics)",
          "field_b_term": "connection on fibre bundle (differential geometry)"
        },
        {
          "field_a_term": "singular configuration (robot kinematics)",
          "field_b_term": "connection singularity / curvature singularity"
        },
        {
          "field_a_term": "geometric phase / holonomy (Berry phase, quantum mechanics)",
          "field_b_term": "end-effector rotation from closed joint-space loop (robotics)"
        }
      ],
      "references": [
        {
          "url": "https://www.taylorfrancis.com/books/mono/10.1201/9780203758847/nonholonomic-mechanics-control-anthony-bloch",
          "note": "Pontryagin et al. (1962) Mathematical Theory of Optimal Processes — Wiley (PMP original)"
        },
        {
          "doi": "10.1137/0310021",
          "note": "Brockett (1972) System theory on group manifolds and coset spaces, SIAM J Control 10:265"
        },
        {
          "url": "https://link.springer.com/book/10.1007/978-0-387-21791-9",
          "note": "Bloch (2003) Nonholonomic Mechanics and Control — Springer Applied Mathematical Sciences"
        },
        {
          "url": "https://www.ams.org/books/surv/091/",
          "note": "Montgomery (2002) A Tour of Subriemannian Geometries, Their Geodesics and Applications — AMS"
        },
        {
          "doi": "10.1073/pnas.1019272108",
          "note": "Sachkov (2011) Control theory on Lie groups, J Math Sci — review of geometric control"
        }
      ],
      "last_reviewed": "2026-05-05",
      "file": "cross-domain/engineering-mathematics/b-control-theory-differential-geometry.yaml"
    },
    {
      "id": "b-control-theory-lie-groups",
      "title": "The geometric structure of nonlinear control systems on Lie groups — characterised by the Chow-Rashevski theorem via the Lie algebra rank condition — provides the correct framework for robotic motion planning and spacecraft attitude control, replacing Euclidean linearisation methods that fail for large-angle maneuvers.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "Classical linear control theory (PID, LQR, Kalman filter) works in Euclidean spaces (ℝⁿ) where linear approximations remain valid near an operating point. For robotic systems and spacecraft, the configuration space is not Euclidean but a Lie group: the group SE(3) = SO(3) ⋉ ℝ³ of rigid body motions ",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-geometric-control-se3-optimal-robotic-grasping"
      ],
      "communication_gap": "Control theory and Lie group mathematics developed in separate communities. Lie groups were studied by mathematicians (Sophus Lie, Cartan, Weyl) and theoretical physicists (gauge theory, particle physics). Engineering control theorists (Kalman, Zames, Åström) worked predominantly in linear systems and Euclidean state spaces. Brockett (1972) first explicitly connected Lie brackets to controllability for engineers, but the resulting literature (geometric mechanics, Bullo & Lewis 2005) remained in academic robotics. Practising control engineers in aerospace and robotics industry primarily still use quaternion-based or Euler angle methods, despite their theoretical limitations.\n",
      "translation_table": [
        {
          "field_a_term": "Lie group G (configuration space of mechanical system)",
          "field_b_term": "state space X of control system"
        },
        {
          "field_a_term": "Lie algebra g (tangent space at identity)",
          "field_b_term": "linearised control inputs at operating point"
        },
        {
          "field_a_term": "Lie bracket [f, g] (commutator of vector fields)",
          "field_b_term": "second-order maneuver achievable by combining first-order inputs"
        },
        {
          "field_a_term": "Chow-Rashevski theorem (Lie algebra rank condition)",
          "field_b_term": "controllability condition for nonlinear systems"
        },
        {
          "field_a_term": "exponential map exp: g → G",
          "field_b_term": "finite rotation from infinitesimal generator (ωt)"
        },
        {
          "field_a_term": "adjoint action Ad_g: g → g",
          "field_b_term": "coordinate transformation of velocity under configuration change"
        },
        {
          "field_a_term": "geodesic on Lie group (optimal path under left-invariant metric)",
          "field_b_term": "time-optimal or energy-optimal control trajectory"
        },
        {
          "field_a_term": "nonholonomic constraint (rolling without slipping)",
          "field_b_term": "system with fewer control inputs than state dimensions"
        }
      ],
      "references": [
        {
          "note": "Chow (1939) Math Ann 117:98 — Chow's theorem on nonholonomic controllability"
        },
        {
          "doi": "10.1137/0310021",
          "note": "Brockett (1972) SIAM J Control 10:265 — system theory on Lie groups"
        },
        {
          "note": "Bullo & Lewis (2005) Geometric Mechanics and Robotics. Springer"
        },
        {
          "note": "Jurdjevic (1997) Geometric Control Theory. Cambridge University Press"
        }
      ],
      "last_reviewed": "2026-05-06",
      "file": "cross-domain/engineering-mathematics/b-control-theory-lie-groups.yaml"
    },
    {
      "id": "b-fiber-optics-nonlinear-schrodinger-equation",
      "title": "Pulse propagation in optical fibers is governed by the nonlinear Schrödinger equation (NLSE), whose exact soliton solutions explain the dispersion-canceling pulses used in long-haul fiber optic communications, connecting photonics engineering to integrable systems mathematics.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "The envelope of an optical pulse in a fiber obeys the NLSE: i∂A/∂z = (β₂/2)∂²A/∂t² − γ|A|²A, where β₂ is group-velocity dispersion and γ is the nonlinear coefficient. This equation is exactly integrable via the inverse scattering transform (Zakharov-Shabat); its soliton solutions |A|² = P·sech²(t/T₀",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-inverse-scattering-fiber-transmission-capacity"
      ],
      "communication_gap": "Fiber optic engineers and mathematicians working on integrable systems use the same NLSE but rarely interact; optical engineers typically use split-step numerical methods while mathematicians develop exact inverse-scattering solutions, and the engineering implications of integrability (eigenvalue communication) are just entering practice.\n",
      "translation_table": [
        {
          "field_a_term": "group-velocity dispersion β₂ (fiber optics)",
          "field_b_term": "dispersive term in NLSE (mathematics)",
          "note": "Anomalous dispersion (β₂ < 0) enables bright solitons; normal dispersion gives dark solitons"
        },
        {
          "field_a_term": "self-phase modulation / Kerr nonlinearity (fiber optics)",
          "field_b_term": "cubic nonlinear term in NLS (mathematics)",
          "note": "Kerr effect ∝ n₂|E|² — exact mathematical analogue of the focusing NLS nonlinearity"
        },
        {
          "field_a_term": "optical soliton (fiber optics)",
          "field_b_term": "soliton solution of focusing NLS / inverse scattering eigenvalue (mathematics)",
          "note": "Each soliton corresponds to a bound-state eigenvalue of the Zakharov-Shabat operator"
        },
        {
          "field_a_term": "modulation instability / optical rogue wave (fiber optics)",
          "field_b_term": "Benjamin-Feir instability / Peregrine soliton (mathematics)",
          "note": "MI is the NLS analogue of deep-water wave instability; Peregrine breathers model rogue waves"
        }
      ],
      "references": [
        {
          "doi": "10.1103/PhysRevLett.30.829",
          "note": "Hasegawa & Tappert (1973) — prediction of optical solitons in fibers"
        },
        {
          "doi": "10.1007/BF01208265",
          "note": "Zakharov & Shabat (1972) — exact theory of the two-dimensional self-focusing"
        },
        {
          "doi": "10.1038/450054a",
          "note": "Mollenauer et al. (2006) — soliton transmission in telecommunications fibers"
        }
      ],
      "last_reviewed": "2026-05-07",
      "file": "cross-domain/engineering-mathematics/b-fiber-optics-nonlinear-schrodinger-equation.yaml"
    },
    {
      "id": "b-finite-element-method-pde",
      "title": "The finite element method is the engineering realization of the mathematical Galerkin variational principle — converting PDEs into solvable algebraic systems through Sobolev-space approximation theory",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "The finite element method (FEM) bridges abstract PDE theory and engineering computation. The weak (variational) form ∫_Ω ∇u·∇v dΩ = ∫_Ω fv dΩ for all test functions v transforms the strong-form PDE into an equivalent minimization problem — grounded in the Lax-Milgram theorem (existence/uniqueness in",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-isogeometric-analysis-superior-convergence-thin-shells"
      ],
      "communication_gap": "Applied mathematicians study FEM convergence theory in journals like Numerische Mathematik and SIAM Journal on Numerical Analysis; engineers use FEM in COMSOL, ANSYS, and Abaqus while reading Engineering Fracture Mechanics and Journal of Computational Physics. The mathematical foundations are rarely taught in engineering FEM courses; engineers perform mesh convergence studies without knowing Cea's lemma. This causes underuse of mathematically proven adaptive strategies.\n",
      "translation_table": [
        {
          "field_a_term": "variational principle (mathematics)",
          "field_b_term": "principle of virtual work (engineering mechanics)",
          "note": "The Galerkin weak form is exactly the virtual work equation used in structural mechanics"
        },
        {
          "field_a_term": "Sobolev space H^1(Ω) (functional analysis)",
          "field_b_term": "finite energy displacement field (engineering)",
          "note": "The mathematical space of square-integrable functions with square-integrable gradient = physically admissible displacements"
        },
        {
          "field_a_term": "stiffness matrix K (FEM)",
          "field_b_term": "structural stiffness (mechanical engineering)",
          "note": "Same object — K encodes how much force is needed per unit displacement, discretized over elements"
        },
        {
          "field_a_term": "Cea's lemma error estimate",
          "field_b_term": "mesh convergence study (engineering validation)",
          "note": "Engineers check h-refinement convergence empirically; Cea's lemma is the mathematical guarantee behind it"
        },
        {
          "field_a_term": "NURBS basis functions (isogeometric analysis)",
          "field_b_term": "CAD spline geometry (engineering design)",
          "note": "Isogeometric analysis uses the same splines for analysis and design — eliminating mesh generation"
        }
      ],
      "references": [
        {
          "note": "Courant (1943). Variational methods for the solution of problems of equilibrium and vibrations. Bull Am Math Soc 49:1."
        },
        {
          "note": "Babuška & Aziz (1972). The Mathematical Foundations of the Finite Element Method with Applications to Partial Differential Equations. Academic Press."
        },
        {
          "note": "Hughes, T.J.R. (2000). The Finite Element Method: Linear Static and Dynamic Finite Element Analysis. Dover."
        },
        {
          "note": "Cottrell, Hughes & Bazilevs (2009). Isogeometric Analysis: Toward Integration of CAD and FEA. Wiley."
        }
      ],
      "last_reviewed": "2026-05-06",
      "file": "cross-domain/engineering-mathematics/b-finite-element-method-pde.yaml"
    },
    {
      "id": "b-finite-element-x-discrete-exterior-calculus",
      "title": "Finite element exterior calculus and discrete exterior calculus provide structure-preserving discretizations of Hodge theory, unifying mixed FEM stability with geometric discretization.",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "Partial differential equations on manifolds involving div, grad, and curl fit into de Rham complexes; stable mixed finite elements (Raviart–Thomas, Nedelec) construct discrete complexes that commute with projections. Discrete exterior calculus assigns discrete forms to mesh elements and defines disc",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-mixed-fem-for-hodge-laplace-matches-dec-upwind-schemes"
      ],
      "communication_gap": "Engineering FEM courses emphasize patch tests; mathematics FEEC courses emphasize complexes. Practitioners may not recognize they already implement DEC-adjacent schemes.",
      "translation_table": [
        {
          "field_a_term": "mixed variational form",
          "field_b_term": "pairing of k-forms and (k−1)-forms on a mesh"
        },
        {
          "field_a_term": "inf-sup stability",
          "field_b_term": "closed range of discrete exterior derivative"
        },
        {
          "field_a_term": "Nedelec elements",
          "field_b_term": "discrete 1-forms for curl-curl problems"
        }
      ],
      "references": [
        {
          "doi": "10.1090/S0025-5718-10-02339-2",
          "note": "Arnold, Falk & Winther (2010) — finite element exterior calculus foundation"
        },
        {
          "doi": "10.1016/j.cma.2006.12.009",
          "note": "Hirani (2003) thesis widely cited; see also DEC formulations in CMAME cluster for discrete forms"
        }
      ],
      "last_reviewed": "2026-05-07",
      "file": "cross-domain/engineering-mathematics/b-finite-element-x-discrete-exterior-calculus.yaml"
    },
    {
      "id": "b-graph-algorithms-network-optimization",
      "title": "Graph theory provides the mathematical foundation for network optimization in engineering: Dijkstra's shortest path, the max-flow min-cut theorem, and the traveling salesman problem's Christofides approximation translate directly into GPS routing, logistics supply chains, VLSI circuit routing, and telecommunications network design.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "Graph algorithms represent one of the most direct translations of mathematical theory into engineering practice:\nShortest path: Dijkstra (1959) — O(E log V) with binary heap for non-negative edge weights, solving the single-source shortest path problem exactly. Bellman-Ford (1958) — O(VE), handles n",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-christofides-tight-example-construction"
      ],
      "communication_gap": "Graph algorithms developed in both mathematics (combinatorics, graph theory) and computer science (algorithms and complexity). Operations research adopted these results but maintains separate journals (Management Science, Operations Research) from theoretical computer science (STOC, FOCS). Engineering practice often uses proprietary solver implementations (CPLEX, Gurobi) without direct engagement with the underlying algorithmic literature. The 2021 Karlin-Klein-Gharan improvement on Christofides was a 47-year-delayed breakthrough that received little engineering press coverage despite its foundational implications.\n",
      "translation_table": [
        {
          "field_a_term": "graph G = (V, E) with edge weights w(e)",
          "field_b_term": "transportation network (nodes = cities/hubs, edges = routes, weights = cost/distance)",
          "note": "graph abstraction loses geographic information but gains algorithmic tractability"
        },
        {
          "field_a_term": "Dijkstra's shortest path (greedy, priority queue)",
          "field_b_term": "GPS route planning (A* with Euclidean distance heuristic)",
          "note": "A* is Dijkstra + heuristic; same O(E log V) complexity, much faster in practice"
        },
        {
          "field_a_term": "max-flow min-cut theorem (network flow = minimum separator)",
          "field_b_term": "network capacity planning (bottleneck link = minimum cut)",
          "note": "min-cut identifies the bandwidth bottleneck; max-flow gives achievable throughput"
        },
        {
          "field_a_term": "minimum spanning tree (MST)",
          "field_b_term": "network cabling / backbone design (minimize wire cost, maintain connectivity)",
          "note": "MST minimizes total edge weight while keeping graph connected — direct cable cost model"
        },
        {
          "field_a_term": "TSP (find minimum-weight Hamiltonian cycle)",
          "field_b_term": "vehicle routing / delivery optimization (minimize fuel cost over all stops)",
          "note": "real logistics uses VRPTW (TSP + time windows + multiple vehicles); NP-hard core"
        },
        {
          "field_a_term": "bipartite matching (max matching via augmenting paths)",
          "field_b_term": "job assignment / shift scheduling (workers ↔ tasks)",
          "note": "König's theorem: max matching = min vertex cover in bipartite graphs"
        }
      ],
      "references": [
        {
          "doi": "10.1007/BF01386390",
          "note": "Dijkstra (1959) A note on two problems in connexion with graphs; Numer Math 1:269"
        },
        {
          "doi": "10.4153/CJM-1956-045-5",
          "note": "Ford & Fulkerson (1956) Maximal flow through a network; Can J Math 8:399"
        },
        {
          "doi": "10.1007/BF01447886",
          "note": "Menger (1927) Zur allgemeinen Kurventheorie; Math Ann 96:154"
        },
        {
          "note": "Christofides (1976) Worst-case analysis of a new heuristic for the travelling salesman problem; Carnegie Mellon University Report"
        }
      ],
      "last_reviewed": "2026-05-06",
      "file": "cross-domain/engineering-mathematics/b-graph-algorithms-network-optimization.yaml"
    },
    {
      "id": "b-information-theory-data-compression",
      "title": "Shannon's source coding theorem establishes that the entropy H of a source is the fundamental limit of lossless compression, while rate-distortion theory provides the optimal lossy compression bound R(D) — limits that Huffman coding, arithmetic coding, and Lempel-Ziv algorithms approach through distinct mathematical strategies, and that JPEG/MP3 operate near in practice.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "Shannon's source coding theorem (1948) proves that a source with entropy H bits/ symbol can be losslessly compressed to H bits/symbol on average but not below — setting an absolute mathematical lower bound on any lossless compression algorithm. Huffman coding (1952) achieves the optimal prefix-free ",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-shannon-optimal-compression-biological-codes"
      ],
      "communication_gap": "Shannon's original papers are mathematically dense and rarely read by working software engineers. The connection between the mathematical rate-distortion theorem and the engineering design of codecs like HEVC or Opus is mediated by decades of lossy compression research that is not taught in either pure mathematics or standard computer science curricula. Information theorists who prove new bounds rarely implement codecs; codec engineers rarely read Shannon's original 1959 rate-distortion paper. This gap has slowed the adoption of theoretically optimal approaches (e.g., asymmetric numeral systems replacing Huffman in modern codecs).\n",
      "translation_table": [
        {
          "field_a_term": "entropy H(X) = -Σ p(x) log p(x)",
          "field_b_term": "theoretical minimum bits/symbol for lossless compression",
          "note": "Shannon proved no compressor can beat H bits/symbol on average"
        },
        {
          "field_a_term": "prefix-free (Kraft inequality) code",
          "field_b_term": "Huffman code tree",
          "note": "Huffman's greedy algorithm achieves the optimal prefix-free code with expected length < H+1"
        },
        {
          "field_a_term": "universal source coding",
          "field_b_term": "Lempel-Ziv algorithms (LZ77, LZ78)",
          "note": "LZ algorithms adapt to unknown source statistics and converge to H asymptotically"
        },
        {
          "field_a_term": "rate-distortion function R(D)",
          "field_b_term": "compression ratio vs. perceptual quality tradeoff in JPEG/MP3",
          "note": "The R(D) curve is the Pareto frontier that real codecs attempt to approach"
        },
        {
          "field_a_term": "mutual information I(X;X̂)",
          "field_b_term": "information retained after lossy coding",
          "note": "Rate-distortion minimizes mutual information subject to a distortion constraint"
        },
        {
          "field_a_term": "channel capacity C",
          "field_b_term": "maximum throughput of a compressed data channel",
          "note": "Source coding (compression) and channel coding (error correction) together achieve C − H overhead"
        }
      ],
      "references": [
        {
          "note": "Shannon (1948) — A Mathematical Theory of Communication",
          "doi": "10.1002/j.1538-7305.1948.tb01338.x"
        },
        {
          "note": "Huffman (1952) — A Method for the Construction of Minimum-Redundancy Codes",
          "doi": "10.1109/JRPROC.1952.273898"
        },
        {
          "note": "Ziv & Lempel (1977) — universal algorithm for sequential data compression",
          "doi": "10.1109/TIT.1977.1055714"
        },
        {
          "note": "Shannon (1959) — Coding theorems for a discrete source with a fidelity criterion",
          "doi": "10.1002/j.1538-7305.1959.tb03905.x"
        }
      ],
      "last_reviewed": "2026-05-06",
      "file": "cross-domain/engineering-mathematics/b-information-theory-data-compression.yaml"
    },
    {
      "id": "b-lidar-x-inverse-problems",
      "title": "LiDAR point clouds are discrete samples of a scene geometry obtained by solving ranging inverse problems — echo timing and beam spreading couple engineering sensing to geometric tomography.",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "A LiDAR system estimates range by relating emitted pulse travel time (and waveform shape for full-waveform systems) to distance, under assumptions about scattering and noise. Reconstructing surfaces, canopy profiles, or urban meshes from point sets is ill-posed: infinitely many continuous surfaces e",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-sparsity-priors-stabilize-lidar-surface-recovery"
      ],
      "communication_gap": "Geomatics workflows emphasize heuristics and SLAM; inverse-problems theory is often black-boxed in software stacks, limiting uncertainty certificates.",
      "translation_table": [
        {
          "field_a_term": "forward operator (wave propagation + detection)",
          "field_b_term": "LiDAR range equation with beam convolution"
        },
        {
          "field_a_term": "Tikhonov regularization",
          "field_b_term": "surface smoothing penalties in mesh fitting"
        },
        {
          "field_a_term": "non-uniqueness / nullspace",
          "field_b_term": "occlusions and missing returns creating geometric ambiguities"
        }
      ],
      "references": [
        {
          "doi": "10.1109/TGRS.2004.839552",
          "note": "Representative full-waveform / LiDAR signal processing bridge paper in remote sensing"
        },
        {
          "doi": "10.1029/2001RS002475",
          "note": "Harding et al. (2001) — airborne LiDAR vegetation mapping methodology (geometric inverse flavor)"
        }
      ],
      "last_reviewed": "2026-05-07",
      "file": "cross-domain/engineering-mathematics/b-lidar-x-inverse-problems.yaml"
    },
    {
      "id": "b-numerical-methods-scientific-computing",
      "title": "Numerical Methods and Scientific Computing — finite differences, Runge-Kutta, Krylov solvers, and GPU acceleration form the computational backbone of climate models, CFD, and AI training",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "Scientific computing converts continuous differential equations into discrete approximations solvable by digital computers. The finite difference method (FDM) approximates spatial derivatives: ∂u/∂x ≈ (u_{i+1}−u_{i-1})/2h (centred difference, 2nd order O(h²)); ∂²u/∂x² ≈ (u_{i+1}−2uᵢ+u_{i-1})/h². Von",
      "open_unknowns": [],
      "related_hypotheses": [],
      "communication_gap": "Numerical analysts (mathematics departments), computational scientists (physics, engineering), and ML engineers (CS departments) have largely parallel literatures on iterative methods, time integration, and matrix factorisation. GPU programming (CUDA) is primarily a CS/engineering skill but determines the practical performance of all scientific simulation codes. Climate scientists who use numerical models rarely engage with the theoretical numerical analysis literature; numerical analysts rarely validate methods on real PDE applications. High-performance computing education remains scattered across disciplines.\n",
      "translation_table": [
        {
          "field_a_term": "CFL stability condition Δt ≤ Δx/c",
          "field_b_term": "maximum timestep for stable explicit time-stepping of wave equation",
          "note": "Implicit methods (Crank-Nicolson, backward Euler) are unconditionally stable but require solving a linear system each step"
        },
        {
          "field_a_term": "order of accuracy O(h^p) of a finite difference scheme",
          "field_b_term": "rate at which numerical error decreases with grid refinement",
          "note": "Second-order centred differences converge as h²; halving grid spacing quarters the error"
        },
        {
          "field_a_term": "Runge-Kutta 4 (4 stage, 4th order)",
          "field_b_term": "gold-standard ODE integrator balancing accuracy and computational cost",
          "note": "RK4: 4 function evaluations per step; Dormand-Prince adds 5th-order correction for adaptive stepping at 6 evaluations"
        },
        {
          "field_a_term": "Krylov subspace iteration (GMRES/CG)",
          "field_b_term": "scalable iterative linear solver for PDE-derived sparse systems",
          "note": "GMRES guaranteed to converge in at most n steps but practical convergence in sqrt(kappa) steps with preconditioner"
        },
        {
          "field_a_term": "condition number kappa = ||A|| ||A^{-1}||",
          "field_b_term": "sensitivity of solution to perturbations; governs iterative solver convergence",
          "note": "Ill-conditioned systems (high kappa) amplify numerical errors; preconditioning constructs M approx A^{-1} to reduce effective kappa"
        },
        {
          "field_a_term": "GPU SIMT execution (single instruction multiple threads)",
          "field_b_term": "massively parallel arithmetic for dense matrix operations",
          "note": "cuBLAS achieves ~20 TFLOP/s on single A100 GPU for FP16 matrix multiplication; 1000× serial CPU throughput"
        }
      ],
      "references": [
        {
          "note": "Courant, Friedrichs & Lewy (1928) Math Ann 100:32 — CFL stability condition"
        },
        {
          "doi": "10.1016/0771-050X(80)90013-3",
          "note": "Dormand & Prince (1980) J Comput Appl Math 6:19 — embedded RK pair for adaptive stepping"
        },
        {
          "doi": "10.1137/0907058",
          "note": "Saad & Schultz (1986) SIAM J Sci Stat Comput 7:856 — GMRES algorithm"
        },
        {
          "doi": "10.1109/MM.2008.31",
          "note": "Nickolls et al. (2008) IEEE Micro 28:39 — CUDA parallel computing architecture"
        }
      ],
      "last_reviewed": "2026-05-06",
      "file": "cross-domain/engineering-mathematics/b-numerical-methods-scientific-computing.yaml"
    },
    {
      "id": "b-optimization-algorithms-convex-analysis",
      "title": "Gradient descent and its variants (Nesterov acceleration, proximal methods, ADMM) derive their convergence guarantees from convex analysis: O(1/t) for convex, O(exp(-t)) for strongly convex, and optimal O(1/t²) for Nesterov momentum — unifying engineering optimization with mathematical analysis of convex functions.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "Gradient descent x_{t+1} = x_t - η∇f(x_t) converges at rate O(1/t) for L-smooth convex f (Lipschitz gradient, ‖∇f(x)-∇f(y)‖ ≤ L‖x-y‖) and at rate O(exp(-μt/L)) for μ-strongly convex f (where μ = σ_min(∇²f)). Nesterov's accelerated gradient method (1983) achieves the optimal rate O(L/t²) for smooth c",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-distribution-shift-invariant-risk-minimization"
      ],
      "communication_gap": "Nesterov (1983) published the accelerated gradient method in a Russian-language journal (Doklady Akademii Nauk) and the result was unknown in Western machine learning until the late 2000s. Boyd et al. (2011) ADMM monograph in Foundations and Trends made convex optimization accessible to engineers and machine learning practitioners without background in convex analysis. Rockafellar (1970) remains the definitive mathematical reference but is rarely cited by machine learning engineers.\n",
      "translation_table": [
        {
          "field_a_term": "gradient ∇f(x) (smooth function derivative)",
          "field_b_term": "subgradient ∂f(x) (set-valued, for non-smooth convex f)",
          "note": "subgradient extends gradient to non-differentiable points; ∂(‖x‖₁) = sign(x) at non-zero"
        },
        {
          "field_a_term": "Lipschitz continuity of gradient (L-smoothness)",
          "field_b_term": "bound on curvature of f; enables step size η = 1/L convergence guarantee",
          "note": "L-smoothness is the key regularity condition that makes gradient descent's O(1/t) rate provable"
        },
        {
          "field_a_term": "strong convexity parameter μ (‖x-y‖² ≤ f(x)-f(y)...)",
          "field_b_term": "condition number κ = L/μ (determines convergence rate)",
          "note": "well-conditioned problems (small κ) converge exponentially; ill-conditioned problems need preconditioning"
        },
        {
          "field_a_term": "proximal operator prox_{τg}(x) (denoising step)",
          "field_b_term": "projection onto convex set C (special case g = indicator of C)",
          "note": "proximal operator generalizes projection; equals soft thresholding for L1 regularization"
        },
        {
          "field_a_term": "Lagrangian duality gap = 0 for convex programs (Slater's condition)",
          "field_b_term": "strong duality (primal = dual optimal value)",
          "note": "strong duality fails for non-convex programs; convexity is sufficient for zero duality gap"
        }
      ],
      "references": [
        {
          "note": "Nesterov (1983) — A method for solving convex programming problems with convergence rate O(1/k²); Doklady AN USSR 269:543"
        },
        {
          "doi": "10.1561/2200000016",
          "note": "Boyd et al. (2011) — Distributed optimization and statistical learning via the ADMM; Found Trends Mach Learn 3:1"
        },
        {
          "doi": "10.1561/2400000003",
          "note": "Parikh & Boyd (2014) — Proximal algorithms; Found Trends Optim 1:127"
        },
        {
          "note": "Rockafellar (1970) — Convex Analysis; Princeton University Press"
        }
      ],
      "last_reviewed": "2026-05-06",
      "file": "cross-domain/engineering-mathematics/b-optimization-algorithms-convex-analysis.yaml"
    },
    {
      "id": "b-signal-processing-fourier-analysis",
      "title": "Signal processing is applied Fourier analysis — the FFT, Nyquist theorem, and filter design are engineering implementations of mathematical harmonic analysis",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "All of modern signal processing rests on the Fourier transform F(ω) = ∫f(t)e^{-iωt}dt, which decomposes any signal into frequency components. The convolution theorem (convolution in time = multiplication in frequency) makes filter design algebraic in frequency space. The Cooley-Tukey FFT (1965) comp",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-compressed-sensing-mri-fourier-sparsity"
      ],
      "communication_gap": "Electrical engineering and signal processing education teaches the Fourier transform as a computational tool (FFT algorithms) without the mathematical foundations (L² spaces, distributions, harmonic analysis). Mathematicians who developed the Plancherel theorem and distribution theory rarely engage with engineering applications. The gap means engineers lack tools to extend beyond classical Fourier (e.g., non-commutative Fourier analysis for non-Euclidean domains like graphs and manifolds).\n",
      "translation_table": [
        {
          "field_a_term": "Fourier transform F(omega) = integral f(t) exp(-i omega t) dt",
          "field_b_term": "signal decomposition into frequency components",
          "note": "Engineering uses DFT/FFT; mathematics studies L^2(R) and distribution theory"
        },
        {
          "field_a_term": "convolution theorem (f*g <-> F*G in frequency domain)",
          "field_b_term": "linear time-invariant filter design",
          "note": "Filtering = multiplication in frequency domain; LTI systems have transfer functions H(omega)"
        },
        {
          "field_a_term": "Nyquist-Shannon sampling theorem",
          "field_b_term": "digital signal reconstruction from discrete samples",
          "note": "Sampling at f_s > 2*f_max guarantees perfect reconstruction; aliasing occurs below Nyquist rate"
        },
        {
          "field_a_term": "Parseval's theorem (energy conservation in Fourier domain)",
          "field_b_term": "power spectral density (energy distribution across frequencies)",
          "note": "Total signal energy = integral of power spectral density"
        },
        {
          "field_a_term": "uncertainty principle Delta_t * Delta_omega >= 1/2",
          "field_b_term": "time-frequency resolution tradeoff in spectrograms and wavelets",
          "note": "Perfect time localization and perfect frequency localization are mutually exclusive"
        }
      ],
      "references": [
        {
          "note": "Fourier, J.B.J. (1822). Théorie analytique de la chaleur. Paris: Firmin Didot."
        },
        {
          "doi": "10.1090/S0025-5718-1965-0178586-1",
          "note": "Cooley & Tukey (1965). An algorithm for the machine calculation of complex Fourier series. Math Comput 19:297."
        },
        {
          "doi": "10.1109/JRPROC.1949.232969",
          "note": "Shannon, C.E. (1949). Communication in the presence of noise. Proc IRE 37:10."
        },
        {
          "note": "Oppenheim, A.V. & Schafer, R.W. (2010). Discrete-Time Signal Processing, 3rd ed. Prentice Hall."
        }
      ],
      "last_reviewed": "2026-05-06",
      "file": "cross-domain/engineering-mathematics/b-signal-processing-fourier-analysis.yaml"
    },
    {
      "id": "b-traffic-flow-lwr-pde",
      "title": "The Lighthill-Whitham-Richards (LWR) traffic flow model treats vehicle density as a conserved quantity obeying a first-order hyperbolic PDE, predicting shock wave formation, traffic jam propagation speed, and stop-and-go wave dynamics using fluid mechanical methods",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "Vehicle traffic obeys the conservation law d_rho/d_t + d_q/d_x = 0 where q = rho * v(rho) is the flow-density fundamental diagram, generating shock waves (traffic jams) that propagate at the Rankine-Hugoniot speed w = (q_R - q_L)/(rho_R - rho_L), with rarefaction waves describing acceleration out of",
      "open_unknowns": [],
      "related_hypotheses": [],
      "communication_gap": "Traffic engineers use empirical fundamental diagrams and microsimulation while applied mathematicians study conservation laws; the formal connection between measured flow-density curves and LWR shock wave theory is established in transportation research but rarely taught as part of fluid mechanics curricula.",
      "translation_table": [
        {
          "field_a_term": "vehicle density rho(x,t) [vehicles/km]",
          "field_b_term": "conserved density in inviscid Burgers/LWR equation",
          "note": "Vehicle count is conserved (no creation/annihilation on freeway); rho satisfies a conservation law"
        },
        {
          "field_a_term": "fundamental diagram q = rho * v(rho)",
          "field_b_term": "flux function F(rho) in hyperbolic conservation law",
          "note": "Greenshields q = v_f * rho * (1 - rho/rho_max); Greenberg, Newell-Daganzo fundamental diagrams differ"
        },
        {
          "field_a_term": "traffic shock wave (stop-and-go jam)",
          "field_b_term": "Rankine-Hugoniot discontinuity in density field",
          "note": "Jam propagates upstream at w = Delta_q / Delta_rho; typical w ~ -20 km/h for freeway traffic"
        },
        {
          "field_a_term": "capacity drop at freeway merge/bottleneck",
          "field_b_term": "rarefaction fan transitioning from uncongested to congested branch",
          "note": "Capacity drop corresponds to lower flow on congested branch of fundamental diagram"
        }
      ],
      "references": [
        {
          "doi": "10.1098/rspa.1955.0089",
          "note": "Lighthill & Whitham (1955) Proc R Soc - kinematic waves I: theory of traffic flow"
        },
        {
          "doi": "10.1287/opre.4.1.42",
          "note": "Richards (1956) Oper Res - shock waves on the highway"
        },
        {
          "doi": "10.1007/s11235-020-00700-3",
          "note": "Daganzo (1995) Transportation Research B - Daganzo fundamental diagram and cell transmission model"
        }
      ],
      "last_reviewed": "2026-05-07",
      "file": "cross-domain/engineering-mathematics/b-traffic-flow-lwr-pde.yaml"
    },
    {
      "id": "b-aeroelastic-flutter-x-hopf-galloping-bifurcation",
      "title": "Classical aeroelastic flutter and galloping — flow-induced limit-cycle oscillations of wings and slender structures — are routinely analyzed with nonlinear dynamical-systems language where onset thresholds correspond to loss of stability of equilibria or periodic orbits, motivating Hopf-/pitchfork-class bifurcation diagrams even though distributed aerodynamics and stall nonlinearities break textbook normal-form universality.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "Reduced-order models (strip theory / harmonic aerodynamics with empirical nonlinear lift curves) map velocity or angle-of-attack parameters to Jacobian spectra whose imaginary-axis crossings signal onset of self-excited oscillations; galloping of slender bluff bodies is historically modeled as coupl",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-hopf-reduced-order-predicts-galloping-onset-threshold"
      ],
      "communication_gap": "Aerospace curricula emphasize frequency-domain panel codes (NASTRAN + doublet-lattice / CFD eigenanalysis) while nonlinear dynamics curricula emphasize normal forms on smooth vector fields — practitioners rarely teach unified bifurcation diagrams spanning quasi-steady galloping and fully coupled flutter unless specializing in FSI research journals.\n",
      "translation_table": [
        {
          "field_a_term": "critical flutter speed / onset envelope branch",
          "field_b_term": "bifurcation parameter crossing imaginary-axis eigenvalues",
          "note": "Reduced-order stability diagrams motivate bifurcation language; distributed nonlinear stall breaks strict Hopf normal-form reduction."
        },
        {
          "field_a_term": "galloping (single-DOF limit cycles from aerodynamic negative damping)",
          "field_b_term": "supercritical Hopf scenario in polynomial damping models",
          "note": "Qualitative only — quasi-steady models capture onset thresholds more cleanly than deep stall regimes."
        },
        {
          "field_a_term": "pitch/heave flutter coupling (frequency coalescence)",
          "field_b_term": "coupled-mode instability / resonance-induced instability in mechanics language",
          "note": "Uses avoided-crossing / eigenvalue locus imagery shared with structural dynamics of gyroscopic systems."
        }
      ],
      "references": [
        {
          "doi": "10.1016/0005-1098(78)90036-5",
          "note": "Holmes & Marsden (1978) — bifurcation to divergence and flutter in flow-induced oscillations (Automatica infinite-dimensional analysis)."
        },
        {
          "doi": "10.1093/qjmam/17.2.225",
          "note": "Parkinson (1964) — square prism as an aeroelastic nonlinear oscillator (quartic stiffness galloping model lineage); Q J Mechanics Appl Math."
        }
      ],
      "last_reviewed": "2026-05-07",
      "file": "cross-domain/engineering-physics/b-aeroelastic-flutter-x-hopf-galloping-bifurcation.yaml"
    },
    {
      "id": "b-antenna-theory-electromagnetic-radiation",
      "title": "All wireless communication reduces to applied Maxwell equations — the Hertzian dipole radiation formula, Friis transmission equation, and phased array beam steering follow from Maxwell's equations with the same mathematics as Bragg diffraction in crystallography.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "The Hertzian dipole (oscillating electric dipole moment p(t) = p₀cos(ωt)) radiates power P = μ₀ω⁴p₀²/(12πc³) — derived directly from Maxwell's equations via the retarded potential formalism. Radiation resistance R_rad = 80π²(l/λ)² Ω relates physical antenna dimensions to radiated power efficiency. T",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-massive-mimo-channel-capacity-maxwell-limit"
      ],
      "communication_gap": "Antenna theory developed in electrical engineering departments largely separate from physics departments despite being applied electrodynamics. The Bragg diffraction connection to phased arrays is known to physicists but rarely mentioned in antenna engineering textbooks (Balanis, Mailloux). Similarly, condensed matter physicists studying metamaterials and antenna engineers designing phased arrays address nearly identical mathematical problems without cross-citation.\n",
      "translation_table": [
        {
          "field_a_term": "Maxwell's equations (∇×E = −∂B/∂t, ∇×H = J + ∂D/∂t)",
          "field_b_term": "governing equations for all antenna radiation and reception"
        },
        {
          "field_a_term": "retarded potential A(r,t) = μ₀/4π ∫ J(r',t_ret)/|r−r'| dV'",
          "field_b_term": "radiated field from current distribution in antenna element"
        },
        {
          "field_a_term": "radiation resistance R_rad (power dissipated as radiation)",
          "field_b_term": "impedance matching parameter for antenna efficiency"
        },
        {
          "field_a_term": "phased array interference condition (Bragg equivalent)",
          "field_b_term": "beam steering direction in MIMO antenna arrays"
        },
        {
          "field_a_term": "Friis equation (free-space path loss)",
          "field_b_term": "link budget calculation for any wireless communication system"
        },
        {
          "field_a_term": "reciprocity theorem (Lorentz reciprocity)",
          "field_b_term": "transmit and receive antenna patterns are identical"
        },
        {
          "field_a_term": "diffraction limit (θ ≈ λ/D)",
          "field_b_term": "antenna beamwidth inversely proportional to aperture size"
        }
      ],
      "references": [
        {
          "note": "Hertz (1887) Über die Ausbreitung der elektrischen Kraft. Ann Phys 267:421"
        },
        {
          "note": "Balanis (2016) Antenna Theory: Analysis and Design, 4th ed. Wiley"
        },
        {
          "doi": "10.1109/JRPROC.1946.234568",
          "note": "Friis (1946) A note on a simple transmission formula. Proc IRE 34:254"
        },
        {
          "note": "Mailloux (2005) Phased Array Antenna Handbook, 2nd ed. Artech House"
        }
      ],
      "last_reviewed": "2026-05-06",
      "file": "cross-domain/engineering-physics/b-antenna-theory-electromagnetic-radiation.yaml"
    },
    {
      "id": "b-chaos-control-systems",
      "title": "Nonlinear control systems with time delays or saturation exhibit Lorenz-type chaos and Hopf bifurcations — the strange attractors and Lyapunov exponents of nonlinear dynamics are the precise engineering tools for analysing when PID controllers, power grids, and feedback loops transition from stable operation to chaotic failure.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "Lorenz (1963) discovered chaos in a three-variable ODE system modelling atmospheric convection. The same mathematical structure — a nonlinear 3D ODE with a dissipative strange attractor and positive Lyapunov exponent — appears in countless engineering feedback control systems.\nENGINEERING MANIFESTAT",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-hopf-bifurcation-power-grid-stability"
      ],
      "communication_gap": "Lorenz (1963) published in the Journal of Atmospheric Sciences, not in an engineering venue. Control theorists developed stability theory (Lyapunov, Routh- Hurwitz) for linear systems; nonlinear control theory integrated chaos theory gradually through the 1990s (Strogatz's textbook 1994 was influential). Power grid engineers still use linear stability analysis (eigenvalue methods) for most applications; the nonlinear bifurcation analysis tools that would give more accurate stability boundaries require expertise in dynamical systems theory that most power engineers do not have. The gap is maintained by separate publication venues, separate training, and the conservatism of safety-critical engineering.\n",
      "translation_table": [
        {
          "field_a_term": "Lyapunov exponent λ₁",
          "field_b_term": "Chaos measure / generalised nonlinear stability margin",
          "note": "λ₁ > 0: chaotic; λ₁ = 0: marginally stable (Hopf bifurcation boundary); λ₁ < 0: stable attractor"
        },
        {
          "field_a_term": "Strange attractor (fractal dimension)",
          "field_b_term": "Long-run behaviour of a chaotic control system",
          "note": "The set of states a chaotic system visits; fractal dimension D_f measures complexity of dynamics"
        },
        {
          "field_a_term": "Hopf bifurcation (eigenvalues cross imaginary axis)",
          "field_b_term": "Stability boundary of a PID or other feedback controller",
          "note": "At Hopf bifurcation: stable fixed point → stable limit cycle → engineering oscillation (hunting)"
        },
        {
          "field_a_term": "Feigenbaum constant δ ≈ 4.669...",
          "field_b_term": "Universal ratio of successive period-doubling bifurcation intervals",
          "note": "Any unimodal map (logistic, tent) has the same δ; universality class appears in physical experiments"
        },
        {
          "field_a_term": "Unstable periodic orbit (UPO)",
          "field_b_term": "Target trajectory for chaos control (OGY method)",
          "note": "UPOs are dense in the strange attractor; small perturbations can stabilise them for engineering control"
        },
        {
          "field_a_term": "Sensitive dependence on initial conditions",
          "field_b_term": "Weather forecasting horizon / unpredictability limit in engineering",
          "note": "Predictability horizon T_h ~ (1/λ₁) log(δx_final/δx_initial); Lorenz found T_h ~ 2 weeks for atmosphere"
        },
        {
          "field_a_term": "Delay differential equation (DDE)",
          "field_b_term": "Feedback control system with time delay",
          "note": "DDEs are infinite-dimensional; they exhibit Hopf bifurcations and chaos at critical delay values τ_c"
        }
      ],
      "references": [
        {
          "doi": "10.1175/1520-0469(1963)020<0130:DNF>2.0.CO;2",
          "note": "Lorenz (1963) J Atmos Sci 20:130 — deterministic nonperiodic flow; discovery of strange attractor"
        },
        {
          "note": "Strogatz (1994) Nonlinear Dynamics and Chaos (Addison-Wesley) — definitive engineering textbook on chaos",
          "url": "https://www.taylorfrancis.com/books/mono/10.1201/9780429492563"
        },
        {
          "note": "Wiggins (1990) Introduction to Applied Nonlinear Dynamical Systems and Chaos (Springer)",
          "url": "https://link.springer.com/book/10.1007/978-1-4757-4067-7"
        },
        {
          "doi": "10.1103/PhysRevLett.64.1196",
          "note": "Ott, Grebogi & Yorke (1990) Phys Rev Lett 64:1196 — controlling chaos; OGY method"
        },
        {
          "doi": "10.1103/PhysRevLett.64.821",
          "note": "Pecora & Carroll (1990) Phys Rev Lett 64:821 — synchronisation of chaotic systems"
        }
      ],
      "last_reviewed": "2026-05-05",
      "file": "cross-domain/engineering-physics/b-chaos-control-systems.yaml"
    },
    {
      "id": "b-hertz-contact-x-spherical-indentation",
      "title": "Hertzian elastic contact theory predicts non-overlapping spherical–sphere or sphere–plane contact areas a² ∝ (R F)^{2/3} under purely elastic deformation — guiding nanoindentation and AFM force–distance interpretation — sharing geometric scaling intuition with general contact-mechanics curricula spanning adhesive contacts (JKR/DMT) that perturb pure Hertz scaling when surface energies matter.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "Hertz theory solves elasticity boundary-value problems assuming parabolic gap profiles and small strains — producing elliptical contact zones with algebraic load–area relations verified across MEMS, granular matter, and tribology contexts. Nanoindentation extracts elastic modulus from unloading stif",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-hertz-contact-x-spherical-indentation"
      ],
      "communication_gap": "Instrument sales literature cites Hertz formulas without elasticity prerequisites common in physics labs — mechanical engineering courses carry deeper tensor elasticity theory seldom mirrored in AFM training workshops outside materials departments.\n",
      "translation_table": [
        {
          "field_a_term": "Contact radius a (Hertz)",
          "field_b_term": "Effective spring constant k_eff ∝ a from stiffness derivative dF/dh",
          "note": "Nanoindent unloading stiffness recovers reduced modulus when geometry calibrated."
        },
        {
          "field_a_term": "Effective elastic modulus E* combination of sphere/plane materials",
          "field_b_term": "Reduced modulus stack used in AFM colloid probe calibration",
          "note": "Same material combination parameter entering sneddon extensions where applicable."
        },
        {
          "field_a_term": "Peak contact pressure p0 (parabolic distribution)",
          "field_b_term": "Yield inception thresholds compared to hardness tests when plasticity appears",
          "note": "Marks departure regime where Hertz alone insufficient — textbook bridges explicitly caution limits."
        }
      ],
      "references": [
        {
          "doi": "10.1016/0167-6636(94)90207-9",
          "note": "Oliver & Pharr (1992) tradition — nanoindentation unloading stiffness analysis extending elastic contact concepts"
        },
        {
          "doi": "10.1115/1.2833524",
          "note": "Johnson — Contact Mechanics classic lineage overview context (ME tribology cluster)"
        }
      ],
      "last_reviewed": "2026-05-07",
      "file": "cross-domain/engineering-physics/b-hertz-contact-x-spherical-indentation.yaml"
    },
    {
      "id": "b-kelvin-wake-angle-x-ship-wave-dispersion-design",
      "title": "Kelvin wake patterns behind ships translate water-wave dispersion relations into naval-engineering design constraints: the observed wake angle reflects phase/group-velocity geometry, hull speed, finite-depth effects, and non-asymptotic near-field structure.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "The bridge connects textbook wave dispersion to practical wake interpretation. It should not be reduced to a universal 19.47 degree angle because modern observations show speed, hull geometry, and finite-depth regimes can narrow or distort the apparent wake.\n",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-dispersion-aware-wake-visualization-improves-hull-wave-interpretation"
      ],
      "communication_gap": "Naval engineers often use empirical resistance and wake models, while physics education emphasizes ideal Kelvin wedges.\n",
      "translation_table": [
        {
          "field_a_term": "gravity-wave dispersion relation",
          "field_b_term": "ship-wave phase and group wake geometry",
          "note": "Dispersion sets the wedge geometry."
        },
        {
          "field_a_term": "Froude number",
          "field_b_term": "hull-speed scaling parameter",
          "note": "Speed changes apparent wake angle."
        },
        {
          "field_a_term": "finite-depth correction",
          "field_b_term": "shallow-water wake spreading or narrowing",
          "note": "Bathymetry changes the transfer."
        }
      ],
      "references": [
        {
          "doi": "10.1103/PhysRevLett.110.214503",
          "note": "Rabaud and Moisy (2013) on narrow ship wakes and Froude-number dependence."
        }
      ],
      "last_reviewed": "2026-05-07",
      "file": "cross-domain/engineering-physics/b-kelvin-wake-angle-x-ship-wave-dispersion-design.yaml"
    },
    {
      "id": "b-metamaterials-negative-refraction",
      "title": "Metamaterials with simultaneously negative permittivity and permeability achieve negative refractive index — Veselago's 1968 theoretical prediction, Pendry's 2000 perfect-lens proposal, and the NIMS experimental demonstration unify electromagnetic theory, photonics engineering, and transformation optics into a single framework for controlling light beyond natural material limits.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "Metamaterials are engineered electromagnetic media with properties absent in any naturally occurring material. Their defining feature is the ability to achieve negative values of both electric permittivity ε and magnetic permeability μ simultaneously, resulting in a negative refractive index n < 0.\n",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-metasurface-flat-lens-diffraction-limited-visible"
      ],
      "communication_gap": "Veselago (1968) published in Soviet Physics Uspekhi during the Cold War — his paper was largely unknown in the West until Pendry's 2000 PRL cited it explicitly. The gap between Veselago's theoretical prediction and experimental confirmation was 32 years, partly due to the journal being in Russian and partly because the engineering community saw no route to implementation. Pendry (2000) published in Physical Review Letters (physics) while the experimental demonstration by Smith et al. (2000) appeared simultaneously — an unusual physics-engineering synchronisation. The transformation optics framework (Pendry et al. 2006) appeared simultaneously with an independent derivation by Leonhardt (2006), both in Science. Electrical engineering antenna communities and photonics communities developed metamaterial research somewhat separately from the condensed-matter physics community.\n",
      "translation_table": [
        {
          "field_a_term": "negative permittivity ε < 0 (plasma frequency)",
          "field_b_term": "thin wire array at sub-plasma frequency (engineering implementation)",
          "note": "metals have ε < 0 below plasma frequency; wire arrays tune effective plasma frequency"
        },
        {
          "field_a_term": "negative permeability μ < 0 (magnetic resonance)",
          "field_b_term": "split-ring resonator (SRR) array at resonant frequency",
          "note": "no natural material has μ < 0 at optical frequencies; SRRs engineer it"
        },
        {
          "field_a_term": "negative refractive index n = -√(εμ) < 0",
          "field_b_term": "backwards-wave medium (phase velocity antiparallel to energy flow)",
          "note": "experimentally verified by Snell's law angle measurements at microwave frequencies"
        },
        {
          "field_a_term": "evanescent wave amplification (growing exponential in n=-1 slab)",
          "field_b_term": "sub-diffraction imaging (Pendry perfect lens)",
          "note": "evanescent waves carry spatial frequencies beyond k=ω/c — sub-diffraction information"
        },
        {
          "field_a_term": "coordinate transformation in Maxwell's equations",
          "field_b_term": "engineered ε(r)/μ(r) tensor profile (transformation optics)",
          "note": "material recipe for any desired electromagnetic functionality derived from geometry"
        },
        {
          "field_a_term": "electromagnetic cloaking (coordinate transformation routes waves around object)",
          "field_b_term": "anisotropic metamaterial shell with spatially varying ε/μ",
          "note": "first demonstrated at microwave; optical cloaking constrained by material losses"
        }
      ],
      "references": [
        {
          "note": "Veselago (1968) Sov Phys Usp 10:509 — theoretical prediction of negative refractive index"
        },
        {
          "doi": "10.1103/PhysRevLett.85.3966",
          "note": "Pendry (2000) Phys Rev Lett 85:3966 — perfect lens via negative refraction"
        },
        {
          "doi": "10.1103/PhysRevLett.84.4184",
          "note": "Smith et al. (2000) Phys Rev Lett 84:4184 — first experimental n<0 metamaterial"
        },
        {
          "doi": "10.1126/science.1125907",
          "note": "Pendry et al. (2006) Science 312:1780 — transformation optics and electromagnetic cloaking"
        }
      ],
      "last_reviewed": "2026-05-06",
      "file": "cross-domain/engineering-physics/b-metamaterials-negative-refraction.yaml"
    },
    {
      "id": "b-metamaterials-negative-refractive-index",
      "title": "Electromagnetic metamaterials with simultaneously negative permittivity (ε < 0) and permeability (μ < 0) produce negative refractive index (n = -√(εμ) < 0), enabling perfect lensing beyond the diffraction limit and electromagnetic cloaking — with direct extensions to acoustic and elastic metamaterials for sound and vibration control.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "VESELAGO'S PREDICTION (1968): Maxwell's equations allow negative refractive index if BOTH ε < 0 AND μ < 0 simultaneously. For a plane wave with wave vector k:\n\n  k = (ω/c) n = (ω/c) √(εμ)\n\nWhen ε < 0 and μ < 0: n = -√(|ε||μ|) < 0. The phase velocity is antiparallel to the energy flow (Poynting vecto",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-acoustic-metamaterial-cloaking-bandwidth-thickness-tradeoff"
      ],
      "communication_gap": "Veselago's original 1968 paper was ignored for 30 years — published in a Soviet physics journal, largely unknown to Western engineers. Pendry's 2000 perfect lens paper triggered an explosive reconnection between electromagnetic theory and materials engineering. Acoustic metamaterials researchers and electromagnetic metamaterial researchers still publish largely in separate journals.\n",
      "translation_table": [
        {
          "field_a_term": "negative permittivity epsilon < 0 (electromagnetism)",
          "field_b_term": "negative compressibility (acoustic metamaterials)",
          "note": "Both describe materials that respond to driving force in opposite direction to natural systems"
        },
        {
          "field_a_term": "negative permeability mu < 0 (electromagnetism)",
          "field_b_term": "negative effective density rho_eff < 0 (acoustics)",
          "note": "Achieved via resonant local resonators; analogy is precise at the level of wave equations"
        },
        {
          "field_a_term": "evanescent wave amplification (negative index lens)",
          "field_b_term": "surface plasmon polariton enhancement",
          "note": "Veselago lens works by surface plasmons on the interfaces amplifying evanescent components"
        },
        {
          "field_a_term": "transformation optics (electromagnetic cloaking)",
          "field_b_term": "coordinate transformation in general relativity",
          "note": "Form invariance of Maxwell's equations under coordinate transformations is the mathematical foundation of both"
        }
      ],
      "references": [
        {
          "note": "Veselago (1968) Sov Phys Usp 10:509 — theoretical prediction of negative index"
        },
        {
          "doi": "10.1103/PhysRevLett.76.4773",
          "note": "Pendry et al. (1999) Phys Rev Lett 76 — split ring resonators for negative mu"
        },
        {
          "doi": "10.1103/PhysRevLett.85.3966",
          "note": "Pendry (2000) Phys Rev Lett 85:3966 — perfect lens"
        },
        {
          "doi": "10.1126/science.1058847",
          "note": "Shelby et al. (2001) Science 292:77 — first experimental realisation"
        }
      ],
      "last_reviewed": "2026-05-06",
      "file": "cross-domain/engineering-physics/b-metamaterials-negative-refractive-index.yaml"
    },
    {
      "id": "b-microelectronics-quantum-confinement",
      "title": "Sub-10 nm transistor scaling forces quantum confinement effects — tunneling leakage, ballistic transport (Landauer formula), and quantum capacitance — into the engineering design space, bridging quantum physics with semiconductor device engineering at the 3nm node and beyond.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "Moore's law scaling has brought transistor gate lengths below 10 nm (commercial production: TSMC 3nm node, 2022; Intel 20A/18A, 2024), at which quantum mechanical effects are no longer negligible perturbations but dominate device behavior. Three quantum phenomena define the engineering design challe",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-gaa-nanosheet-ballistic-transport-regime-room-temperature-3nm"
      ],
      "communication_gap": "Quantum transport theory (published in Phys Rev B, Nano Lett, using NEGF/Landauer formalism) is developed in physics and electrical engineering departments with significant overlap at places like MIT, Stanford, and ETH. However, industrial semiconductor engineering (published in IEEE Trans Electron Devices, IEDM proceedings) focuses on empirical ITRS roadmap metrics and TCAD simulation rather than first- principles quantum theory. Academic quantum transport researchers are often unaware of the specific constraints (contact resistance, variability, reliability at high field) that dominate industrial device design. The Landauer formula is standard in mesoscopic physics but not in most electrical engineering curricula.\n",
      "translation_table": [
        {
          "field_a_term": "quantum tunneling probability T = exp(-2d√(2m*φ)/ℏ)",
          "field_b_term": "gate dielectric leakage current engineering limit"
        },
        {
          "field_a_term": "Landauer conductance quantum G₀ = 2e²/h = 77.5 µS",
          "field_b_term": "maximum ON-current per conducting channel in nanoscale transistor"
        },
        {
          "field_a_term": "transmission probability T (Landauer-Büttiker formalism)",
          "field_b_term": "device ON/OFF ratio (transmission near 1 for ON, near 0 for OFF)"
        },
        {
          "field_a_term": "transverse subband quantization (M modes in nanowire/nanosheet)",
          "field_b_term": "parallel conduction channels in GAA nanosheet transistor"
        },
        {
          "field_a_term": "quantum capacitance C_Q = e²·DOS (limits gate control at high density)",
          "field_b_term": "intrinsic speed limit of transistor switching at GHz-THz frequencies"
        },
        {
          "field_a_term": "high-κ dielectric (HfO₂, κ = 25)",
          "field_b_term": "engineering solution to tunnel leakage — thicker physical layer, same EOT"
        },
        {
          "field_a_term": "2D material monolayer channel (MoS₂, 0.65 nm thick)",
          "field_b_term": "ultimate short-channel immunity (channel thickness = 1 atomic layer)"
        }
      ],
      "references": [
        {
          "doi": "10.1109/16.791980",
          "note": "Hu et al. (1999) IEEE Trans Electron Devices 46:2320 — FinFET: a self-aligned double-gate MOSFET scalable to 20 nm"
        },
        {
          "doi": "10.1147/rd.13.0223",
          "note": "Landauer (1957) IBM J Res Dev 1:223 — spatial variation of currents and fields due to localized scatterers in metallic conduction"
        },
        {
          "doi": "10.1126/science.1102896",
          "note": "Novoselov et al. (2004) Science 306:666 — electric field effect in atomically thin carbon films (graphene FET)"
        },
        {
          "note": "Sze & Ng (2007) Physics of Semiconductor Devices (3rd ed.) — Wiley (standard reference)"
        }
      ],
      "last_reviewed": "2026-05-06",
      "file": "cross-domain/engineering-physics/b-microelectronics-quantum-confinement.yaml"
    },
    {
      "id": "b-openalex-topology-electrical-circuits-x-condensed-matter-physics",
      "title": "Topoelectrical circuits realize condensed-matter topological band invariants in controllable RLC networks, where impedance boundary modes map to edge states protected by circuit-symmetry class",
      "source_domain": "",
      "target_domain": "",
      "status": "proposed",
      "bridge_claim": "Electrical circuit Laplacians can be designed to emulate tight-binding Hamiltonians from topological condensed matter. In this mapping, the circuit admittance matrix Y(omega) plays the role of an effective Bloch Hamiltonian H(k), and resonant impedance peaks at boundaries correspond to topological e",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-topoelectrical-circuit-edge-mode-disorder-threshold"
      ],
      "communication_gap": "Circuit designers often frame results in impedance/network language, while condensed-matter groups frame the same structure in Hamiltonian, symmetry class, and topological invariant language. The mathematics is close, but publication venues and notation conventions keep the communities partially siloed.\n",
      "translation_table": [
        {
          "field_a_term": "tight-binding Bloch Hamiltonian H(k)",
          "field_b_term": "circuit Laplacian / admittance matrix Y(k, omega)",
          "note": "Both encode band structure and symmetry-constrained mode spectra under periodic geometry assumptions"
        },
        {
          "field_a_term": "topological edge state",
          "field_b_term": "high-impedance boundary resonance mode",
          "note": "Protected boundary-localized modes appear as robust impedance signatures at circuit edges/corners"
        },
        {
          "field_a_term": "bulk-boundary correspondence",
          "field_b_term": "bulk network topology predicts boundary impedance peaks",
          "note": "Nontrivial bulk phase indicators imply measurable localized boundary response"
        }
      ],
      "references": [
        {
          "arxiv": "1710.03223",
          "note": "Lee et al. (2017) Topological correspondence between quantum and electric circuit networks."
        },
        {
          "doi": "10.1103/RevModPhys.82.3045",
          "note": "Hasan and Kane (2010) Colloquium on topological insulators."
        }
      ],
      "last_reviewed": "2026-05-08",
      "file": "cross-domain/engineering-physics/b-openalex-topology-electrical-circuits-x-condensed-matter-physics.yaml"
    },
    {
      "id": "b-optical-fiber-nonlinear-optics",
      "title": "Optical fiber communications bridge engineering and physics: single-mode fiber waveguide physics, group velocity dispersion, erbium-doped fiber amplifiers, and Kerr nonlinearity (SPM/XPM/FWM) enable 8 Tbps per fiber across intercontinental distances, with solitons as the nonlinear-dispersive balance solution.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "Optical fiber communication systems require understanding physics across multiple scales and nonlinear regimes. Single-mode fiber (SMF-28): total internal reflection (core n₁=1.4682, cladding n₂=1.4629, NA=0.13); electromagnetic waveguide mode theory; only the LP₀₁ mode propagates above cutoff wavel",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-soliton-basis-transmission-optimal-nonlinear-channel-capacity"
      ],
      "communication_gap": "Telecommunications engineers who design WDM systems focus on system margins, modulation formats (DP-QPSK, QAM), and DSP; nonlinear fiber optics physicists focus on the mathematics of solitons and integrable systems. The engineering literature rarely engages with the deep physics of soliton dynamics; the physics literature rarely considers the practical constraints of real transmission systems.\n",
      "translation_table": [
        {
          "field_a_term": "SMF-28 single-mode optical fiber",
          "field_b_term": "cylindrical dielectric waveguide; TIR confines LP₀₁ mode",
          "note": "electromagnetic boundary conditions determine cutoff; physics of waveguide modes"
        },
        {
          "field_a_term": "group velocity dispersion D = 17 ps/(nm·km)",
          "field_b_term": "d²ω/dk² ≠ 0; material and waveguide dispersion contributions",
          "note": "pulse broadening limits bandwidth; dispersion-shifted fiber moves zero-dispersion to 1550 nm"
        },
        {
          "field_a_term": "EDFA (C-band optical amplifier)",
          "field_b_term": "four-level laser gain medium (Er³⁺ in silica glass at 1530-1565 nm)",
          "note": "population inversion from 980nm or 1480nm pump → stimulated emission at signal wavelength"
        },
        {
          "field_a_term": "Kerr effect (n₂ intensity-dependent index)",
          "field_b_term": "χ⁽³⁾ nonlinear susceptibility of silica → SPM, XPM, FWM",
          "note": "nonlinearity is weak (n₂ small) but large propagation distances make it significant"
        },
        {
          "field_a_term": "soliton (NLS equation solution)",
          "field_b_term": "SPM (anomalous GVD) balances dispersion → pulse shape-preserving propagation",
          "note": "N=1 fundamental soliton: P₀ = |β₂|/(γT₀²); intercontinental propagation without regeneration"
        }
      ],
      "references": [
        {
          "note": "Agrawal (2019) Nonlinear Fiber Optics, 6th ed.; Academic Press"
        },
        {
          "note": "Kao & Hockham (1966) Dielectric-fibre surface waveguides for optical frequencies; Proc IEE 113:1151"
        },
        {
          "note": "Mears et al. (1987) Low-noise erbium-doped fibre amplifier operating at 1.54 μm; Electron Lett 23:1026"
        },
        {
          "doi": "10.1103/PhysRevLett.45.1095",
          "note": "Mollenauer et al. (1980) Experimental observation of picosecond pulse narrowing and solitons in optical fibers; Phys Rev Lett 45:1095"
        }
      ],
      "last_reviewed": "2026-05-07",
      "file": "cross-domain/engineering-physics/b-optical-fiber-nonlinear-optics.yaml"
    },
    {
      "id": "b-phased-array-beamforming-x-multi-coil-wireless-power-interference-lobes",
      "title": "Arrays of driven coils or phased RF transmitters steer magnetic or propagating fields via controlled phases — array factor mathematics producing main beams and grating lobes parallels phased-array antenna theory applied to multi-coil wireless power routing (antenna arrays ↔ resonant power transfer).\n",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "Superposition of currents I_k e^{jφ_k} on identical coils spaced distance d creates interference patterns analogous to antenna arrays: peak constructive steering occurs when phase progression matches spatial phase delay. Undersampled spacing produces unwanted lobes — grating lobes in radar vocabular",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-half-wavelength-coil-spacing-bound-suppresses-near-field-grating-analogs"
      ],
      "communication_gap": "Wireless charging roadmaps emphasize efficiency maps versus gap; antenna-array textbooks emphasize radiation patterns — practitioners rarely share CAD workflows despite mathematical similarity.\n",
      "translation_table": [
        {
          "field_a_term": "array factor F(θ) = Σ a_k e^{jk·r_k}",
          "field_b_term": "spatial profile of magnetic vector potential from phased coil currents",
          "note": "Near-field dominance modifies far-field pattern formulas but interference principle persists."
        },
        {
          "field_a_term": "grating lobe condition for element spacing > λ/2 (propagating regimes)",
          "field_b_term": "unintended coupling to neighboring vehicles/pads in tile arrays",
          "note": "Near-field effective wavelengths differ from free-space λ — engineering uses full-wave correction."
        },
        {
          "field_a_term": "beam steering weights (Taylor/Chebyshev)",
          "field_b_term": "coil current amplitude/phase schedules minimizing peak stray fields subject to efficiency constraints",
          "note": "Optimization parallels array synthesis targets sidelobe suppression."
        }
      ],
      "references": [
        {
          "doi": "10.1109/TPEL.2013.2279268",
          "note": "Nam et al. (2013) — coupled magnetic resonance systems with multiple resonators / positioning context (IEEE Trans. Power Electron.)."
        },
        {
          "doi": "10.1109/TAP.1964.1138256",
          "note": "Taylor & Ku (1964) — classical aperture synthesis / array factor lineage (IEEE Trans. Antennas Propag.)."
        }
      ],
      "last_reviewed": "2026-05-07",
      "file": "cross-domain/engineering-physics/b-phased-array-beamforming-x-multi-coil-wireless-power-interference-lobes.yaml"
    },
    {
      "id": "b-power-grid-stability-kuramoto-synchronization",
      "title": "Power grid stability maps mathematically onto the Kuramoto model of coupled oscillators from physics: generators are phase oscillators coupled by transmission lines, and synchrony corresponds to the grid-locked state; the critical coupling strength for synchronization determines the grid's stability margin against cascading failures.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "The swing equation for a synchronous generator: M·d²δᵢ/dt² + D·dδᵢ/dt = Pᵢ - ∑_j K_ij·sin(δᵢ - δⱼ) is structurally identical to the Kuramoto model dθᵢ/dt = ωᵢ + ∑_j K_ij·sin(θⱼ - θᵢ) for phase oscillators. In both models, synchrony (all dδᵢ/dt → constant) occurs when coupling K > K_c where K_c = 2/(",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-distributed-renewable-generators-kuramoto-stability-different-grid-topology"
      ],
      "communication_gap": "Power systems engineers use the swing equation for grid stability analysis while physicists study the Kuramoto model for synchronization; the mathematical equivalence was recognized in the 2000s and has driven fruitful cross-disciplinary work on complex network synchronization, but many grid stability textbooks do not present the Kuramoto framework, leaving the connection underexploited in engineering education.\n",
      "translation_table": [
        {
          "field_a_term": "generator rotor angle δᵢ (electrical engineering)",
          "field_b_term": "Kuramoto phase oscillator θᵢ (physics)",
          "note": "Both represent the phase of a periodic oscillator; swing equation = second-order Kuramoto model"
        },
        {
          "field_a_term": "transmission line admittance Y_ij (electrical engineering)",
          "field_b_term": "coupling constant K_ij (physics)",
          "note": "Line susceptance determines coupling strength; loss of a line reduces K locally"
        },
        {
          "field_a_term": "grid synchrony at 60/50 Hz (electrical engineering)",
          "field_b_term": "synchronized state of Kuramoto model (r ≈ 1) (physics)",
          "note": "All generators lock to the same frequency; Kuramoto order parameter r measures synchrony"
        },
        {
          "field_a_term": "cascading blackout / frequency collapse (electrical engineering)",
          "field_b_term": "desynchronization transition in Kuramoto model (physics)",
          "note": "Blackouts are the grid-scale manifestation of the Kuramoto phase transition losing coherence"
        }
      ],
      "references": [
        {
          "doi": "10.1063/1.4745197",
          "note": "Dörfler & Bullo (2012) - synchronization and transient stability in power networks (Kuramoto-grid bridge)"
        },
        {
          "doi": "10.1038/nphys2535",
          "note": "Motter et al. (2013) - spontaneous synchrony in power-grid networks"
        },
        {
          "doi": "10.1103/RevModPhys.77.137",
          "note": "Acebrón et al. (2005) - Kuramoto model review"
        }
      ],
      "last_reviewed": "2026-05-07",
      "file": "cross-domain/engineering-physics/b-power-grid-stability-kuramoto-synchronization.yaml"
    },
    {
      "id": "b-resonant-wireless-power-transfer-x-coupled-mode-q-bandwidth-limit",
      "title": "Coupled-mode quality-factor limits in resonant wireless power transfer map directly to the RF bandwidth-efficiency tradeoff in practical charger architectures.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "proposed",
      "bridge_claim": "Resonant inductive links are governed by coupled-mode dynamics where transfer efficiency depends on coupling coefficient k and resonator quality factors (Q_tx, Q_rx). Pushing Q upward improves peak efficiency but narrows usable bandwidth and increases detuning sensitivity, matching the engineering o",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-critical-coupling-tracking-improves-mid-range-wireless-power-efficiency"
      ],
      "communication_gap": "Physics treatments often emphasize ideal coupled resonators, while engineering reports focus on hardware prototyping metrics; the k-Q-bandwidth constraint is the shared core language.\n",
      "translation_table": [
        {
          "field_a_term": "coupled-mode critical coupling (k, Q, detuning)",
          "field_b_term": "impedance-tracking and autotuning loops in WPT chargers",
          "note": "Peak efficiency occurs near matched loss and coupling, not necessarily at maximal unloaded Q."
        },
        {
          "field_a_term": "modal linewidth and bandwidth",
          "field_b_term": "load-transient tolerance and coil-misalignment robustness",
          "note": "Narrow resonances require faster adaptation and tighter mechanical tolerances."
        },
        {
          "field_a_term": "coupling-distance scaling",
          "field_b_term": "efficiency-range operating envelope",
          "note": "Mid-range operation is primarily limited by k collapse and retuning latency."
        }
      ],
      "references": [
        {
          "doi": "10.1126/science.1143254",
          "note": "Kurs et al. (2007), strongly coupled magnetic resonances for mid-range wireless power transfer."
        },
        {
          "doi": "10.1109/JPROC.2013.2244531",
          "note": "Comprehensive review of wireless power transfer methods and tradeoffs."
        }
      ],
      "last_reviewed": "2026-05-08",
      "file": "cross-domain/engineering-physics/b-resonant-wireless-power-transfer-x-coupled-mode-q-bandwidth-limit.yaml"
    },
    {
      "id": "b-skin-friction-x-boundary-layer",
      "title": "Skin friction in wall-bounded turbulence links engineering drag measurements to boundary-layer scaling laws such as the logarithmic law of the wall and roughness-modified shifts.",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "The mean velocity profile near a wall exhibits a logarithmic region in turbulent flow; local wall shear stress (skin friction) sets the friction velocity u_τ and anchors the profile. Engineering correlations (Cf vs Re) inherit these inner-layer scalings. Surface roughness shifts the log law downward",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-law-of-wall-predicts-local-skin-friction-when-roughness-scaled"
      ],
      "communication_gap": "Aerospace reports integrated drag; turbulence theorists emphasize asymptotics. Roughness geometry from manufacturing is rarely fed into first-principles wall models.",
      "translation_table": [
        {
          "field_a_term": "friction velocity u_τ",
          "field_b_term": "√(τ_w/ρ) anchoring near-wall profiles"
        },
        {
          "field_a_term": "Cf = 2τ_w/(ρU^2)",
          "field_b_term": "engineering skin-friction coefficient"
        },
        {
          "field_a_term": "roughness Reynolds number k+",
          "field_b_term": "hydrodynamically smooth vs rough regimes"
        }
      ],
      "references": [
        {
          "doi": "10.1017/S002211207100159X",
          "note": "Jiménez (2004) — turbulent flows over rough walls (canonical scaling bridge)"
        },
        {
          "doi": "10.1017/jfm.2016.543",
          "note": "Representative modern rough-wall turbulence diagnostics (illustrative follow-on)"
        }
      ],
      "last_reviewed": "2026-05-07",
      "file": "cross-domain/engineering-physics/b-skin-friction-x-boundary-layer.yaml"
    },
    {
      "id": "b-soft-ferrite-hysteresis-eddy-current-x-wpt-coil-core-losses",
      "title": "Soft ferrite cores reduce reluctance and concentrate flux in wireless power coils but introduce hysteresis and eddy-current losses that lower effective quality factor — magnetic domain physics ↔ resonant link efficiency budgets.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "Gapped MnZn/NiZn ferrites below saturation exhibit hysteretic B–H loops whose cycle dissipation adds equivalent series resistance to resonant windings; laminated or powdered cores suppress eddy currents at MHz but cannot eliminate domain-wall losses. In coupled-mode WPT models this manifests as incr",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-gapped-ferrite-bias-point-maximizes-wpt-q-under-saturation-margin"
      ],
      "communication_gap": "Ferrite vendors publish core loss curves at sine excitation; WPT systems carry PWM harmonics and spatial bias fields— standardized translation to equivalent Q is inconsistent across suppliers.\n",
      "translation_table": [
        {
          "field_a_term": "hysteresis loss per cycle ∮ H·dB",
          "field_b_term": "equivalent series resistance increment ΔR_hys in coil model",
          "note": "Frequency and flux swing dependent; nonlinear harmonics complicate linear Q definitions."
        },
        {
          "field_a_term": "eddy-current screening in bulk ferrite",
          "field_b_term": "frequency rolloff of effective permeability μ(f)",
          "note": "Powder cores trade permeability for higher usable frequency."
        },
        {
          "field_a_term": "saturation flux density B_sat",
          "field_b_term": "peak current before nonlinear permeability collapses Q and detunes resonance",
          "note": "Thermal drift moves operating point."
        }
      ],
      "references": [
        {
          "doi": "10.1109/TPEL.2014.2336816",
          "note": "Hui et al. (2014) — wireless charging platform design including magnetic materials context (IEEE Trans. Power Electron.)."
        },
        {
          "doi": "10.1109/TMAG.2007.912763",
          "note": "Reisinger et al. (2007) — ferrite materials survey relevant to MHz magnetics (IEEE Trans. Magn.)."
        }
      ],
      "last_reviewed": "2026-05-07",
      "file": "cross-domain/engineering-physics/b-soft-ferrite-hysteresis-eddy-current-x-wpt-coil-core-losses.yaml"
    },
    {
      "id": "b-soft-robotics-hyperelastic-continuum",
      "title": "Soft robotic actuators made from elastomeric materials are modeled as nonlinear hyperelastic continua using stored-energy functions (neo-Hookean, Mooney-Rivlin), enabling predictive finite-element simulation of large-deformation actuation and inverse design of pneumatic artificial muscles\n",
      "source_domain": "",
      "target_domain": "",
      "status": "proposed",
      "bridge_claim": "Soft robots deform through large elastic strains (>100%) that violate small-strain linear elasticity assumptions; hyperelastic continuum mechanics with stored-energy functions W(F) (e.g., W_neo-Hookean = C1(I1-3) for incompressible rubber) accurately models their stress-strain behavior and enables F",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-neo-hookean-model-predicts-soft-actuator-90pct"
      ],
      "communication_gap": "Robotics engineers design soft actuators empirically using trial-and-error fabrication while continuum mechanicists develop constitutive models for rubber and gels; systematic application of hyperelastic FEM as the primary design tool for soft robots is not yet standard practice in the robotics community.\n",
      "translation_table": [
        {
          "field_a_term": "soft actuator bending angle vs. pressure curve (engineering)",
          "field_b_term": "deformation-load relationship from hyperelastic FEM simulation (mechanics)",
          "note": "Actuator bending is predicted by integrating the hyperelastic constitutive law under applied pneumatic pressure boundary conditions"
        },
        {
          "field_a_term": "elastomeric body material (engineering)",
          "field_b_term": "incompressible hyperelastic material with neo-Hookean stored energy W = C1(I1-3) (mechanics)",
          "note": "Silicone (Ecoflex, Dragon Skin) is well-described by neo-Hookean or Mooney-Rivlin models up to ~400% strain"
        },
        {
          "field_a_term": "fiber reinforcement direction in soft actuator (engineering)",
          "field_b_term": "anisotropic fiber stress in hyperelastic composite model (mechanics)",
          "note": "Fiber angle determines actuation mode: helical fibers produce extension, circumferential fibers produce radial inflation"
        },
        {
          "field_a_term": "grasping task specification (engineering)",
          "field_b_term": "inverse kinetostatics problem in nonlinear continuum mechanics (mechanics)",
          "note": "Designing a gripper shape requires solving the inverse problem: what geometry produces the desired contact force?"
        }
      ],
      "references": [
        {
          "doi": "10.1126/scirobotics.aao1893",
          "note": "Rus & Tolley (2015) - design, fabrication and control of soft robots"
        },
        {
          "doi": "10.1002/adma.201702673",
          "note": "Mao et al. (2017) - soft electromagnetic actuators"
        }
      ],
      "last_reviewed": "2026-05-07",
      "file": "cross-domain/engineering-physics/b-soft-robotics-hyperelastic-continuum.yaml"
    },
    {
      "id": "b-tesla-resonant-wireless-power",
      "title": "Resonant inductive coupling between two LC circuits at the same frequency — first demonstrated by Tesla (1891–1900) and formalised by coupled-mode theory — underlies modern wireless power transfer: from Qi charging in 2 billion devices to medical implants and electric vehicle charging.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "Two LC circuits tuned to the same resonant frequency ω₀ = 1/√(LC) exchange energy efficiently via mutual inductance M, even without a direct electrical connection. The coupled-mode theory (CMT) description:\n\n  ȧ₁ = (jω₀ - κ₁)a₁ + jka₂ + F\n  ȧ₂ = (jω₀ - κ₂)a₂ + jka₁\n\nwhere aᵢ are mode amplitudes, κᵢ ",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-resonant-wpt-ev-charging-grid-integration"
      ],
      "communication_gap": "Electrical engineers designing Qi chargers rarely cite Tesla's 1899 Colorado Springs experiments; physicists studying coupled-mode theory rarely discuss consumer power electronics. The MIT Witricity 2007 paper bridged the two but the historical connection to Tesla's work remains under-acknowledged. Conversely, popular accounts of Tesla often describe his wireless power visions without the rigorous coupled-mode theory that explains why resonant coupling is efficient.\n",
      "translation_table": [
        {
          "field_a_term": "resonant frequency ω₀ = 1/√(LC)",
          "field_b_term": "operating frequency of Qi / WPT system (100–200 kHz)",
          "note": "Both transmitter and receiver must be tuned to the same resonant frequency for efficient coupling"
        },
        {
          "field_a_term": "quality factor Q = ω₀L/R",
          "field_b_term": "coil efficiency figure of merit",
          "note": "Higher Q means less energy lost per oscillation cycle; modern coils achieve Q = 100–500"
        },
        {
          "field_a_term": "coupling coefficient k = M/√(L₁L₂)",
          "field_b_term": "spatial alignment sensitivity of wireless charger",
          "note": "k decreases rapidly with distance; Qi pads are designed for k > 0.2"
        },
        {
          "field_a_term": "figure of merit kQ",
          "field_b_term": "overall WPT system efficiency parameter",
          "note": "η ≈ (kQ)² / (1 + (kQ)²); engineering goal is to maximise kQ"
        },
        {
          "field_a_term": "mutual inductance M",
          "field_b_term": "coil geometry / coupling factor",
          "note": "M depends on coil geometry, separation, and alignment; designed to be maximised at target distance"
        }
      ],
      "references": [
        {
          "note": "Tesla, N. (1900). The Problem of Increasing Human Energy. Century Magazine. June 1900. -- Tesla's description of resonant wireless power transmission experiments at Colorado Springs"
        },
        {
          "doi": "10.1126/science.1143254",
          "note": "Kurs et al. (2007). Wireless power transfer via strongly coupled magnetic resonances. Science 317:83–86."
        },
        {
          "doi": "10.1109/TPEL.2010.2073493",
          "note": "Sample et al. (2011). Analysis, experimental results, and range adaptation of magnetically coupled resonators for wireless power transfer. IEEE Trans. Power Electron. 26:3254."
        },
        {
          "doi": "10.1109/TPEL.2015.2453127",
          "note": "Zhong, W.X. & Hui, S.Y.R. (2016). Maximum energy efficiency tracking for wireless power transfer systems. IEEE Trans. Power Electron. 31:5654."
        }
      ],
      "last_reviewed": "2026-05-06",
      "file": "cross-domain/engineering-physics/b-tesla-resonant-wireless-power.yaml"
    },
    {
      "id": "b-thermal-management-heat-transfer",
      "title": "Thermal management engineering deploys Fourier conduction, Newton convection, and Stefan-Boltzmann radiation — the three modes of heat transfer physics — augmented by heat pipes and phase-change materials to solve the semiconductor power density crisis.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "Three fundamental physics laws govern all thermal management: (1) Fourier conduction Q = -kA∇T (k = thermal conductivity, W/m·K — copper 385, diamond 2200, air 0.026); (2) Newton convection Q = hA(T_s - T_∞) where h is the heat transfer coefficient (natural convection 2–25, forced liquid 100–20,000 ",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-pcm-microencapsulation-enables-chiplet-thermal-buffering"
      ],
      "communication_gap": "Physics courses teach Fourier's law and Stefan-Boltzmann as fundamental principles; mechanical engineering courses teach them as design tools. The connection is acknowledged but the two communities publish in Physical Review B (phonon transport) and ASME Journal of Heat Transfer (engineering correlations) with limited cross-citation. Data center thermal engineering (a $10B/year problem) increasingly requires physics expertise in phonon transport, two-phase flow, and materials science that engineering curricula do not cover adequately.\n",
      "translation_table": [
        {
          "field_a_term": "thermal conductivity k (Fourier)",
          "field_b_term": "thermal resistance R_th = L/(kA) per layer in chip package",
          "note": "Engineers sum resistances in series/parallel; physics gives the k value for each material"
        },
        {
          "field_a_term": "Nusselt number Nu = hL/k (dimensionless convection)",
          "field_b_term": "heat transfer coefficient h for convective cooling specification",
          "note": "Nu from fluid mechanics correlations (Dittus-Boelter, Churchill-Bernstein) gives engineering h"
        },
        {
          "field_a_term": "phase transition latent heat ΔH_fus",
          "field_b_term": "thermal energy storage capacity of PCM (J/g)",
          "note": "PCMs store 5–14× more energy per mass than sensible heat at the same ΔT"
        },
        {
          "field_a_term": "capillary pressure Δp_cap = 2σcosθ/r (Young-Laplace)",
          "field_b_term": "maximum heat pipe wick pumping pressure — sets heat transport limit",
          "note": "Physics of capillarity determines the heat pipe performance envelope"
        },
        {
          "field_a_term": "Fin efficiency η_f = tanh(mL)/mL",
          "field_b_term": "real vs. ideal fin heat dissipation — design parameter for heatsinks",
          "note": "Eigenfunction of the 1D heat equation in a fin; engineering design charts approximate it"
        }
      ],
      "references": [
        {
          "note": "Incropera et al. (2007) Fundamentals of Heat and Mass Transfer, 6th ed., Wiley"
        },
        {
          "note": "Faghri (1995) Heat Pipe Science and Technology, Taylor & Francis"
        },
        {
          "doi": "10.1115/1.1616917",
          "note": "Bar-Cohen et al. (2003) — Direct liquid cooling of high flux micro and nano electronic components, ASME J Electron Packag 125:229"
        },
        {
          "doi": "10.1016/j.rser.2009.01.003",
          "note": "Sharma et al. (2009) — Review on thermal energy storage with phase change materials, Renew Sustain Energy Rev 13:2438"
        }
      ],
      "last_reviewed": "2026-05-06",
      "file": "cross-domain/engineering-physics/b-thermal-management-heat-transfer.yaml"
    },
    {
      "id": "b-wpt-resonator-q-bandwidth-tradeoff-x-matching-network-coexistence",
      "title": "High-Q resonators sharpen bandwidth in magnetically coupled wireless power links — coupling bandwidth and impedance matching constraints jointly bound multi-frequency coexistence of resonant WPT channels (RF resonator theory ↔ power electronics).\n",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "Resonant inductive WPT treats coils as coupled LC resonators with loaded quality factor Q = ωL/R and fractional bandwidth Δω/ω ~ 1/Q for simple pole pairs. Narrowband matching maximizes link efficiency at a carrier but suppresses adjacent-band coexistence: tighter coils or higher Q shrink the coupli",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-wpt-coexistence-requires-q-bandwidth-renegotiation-per-standard"
      ],
      "communication_gap": "Power-electronics WPT papers emphasize η vs distance and κ/Γ figures of merit; RF filter/matching literature quantifies insertion loss versus fractional bandwidth with explicit Bode bounds — cross-citation is sporadic.\n",
      "translation_table": [
        {
          "field_a_term": "loaded quality factor Q = ω₀L/R",
          "field_b_term": "coupling bandwidth / spectral occupancy of the resonant link",
          "note": "Higher Q improves circulating currents but narrows the frequency window over which matching holds."
        },
        {
          "field_a_term": "resonator pole sharpness ∝ 1/Q",
          "field_b_term": "tolerance of multi-standard chargers (Qi / proprietary carriers) on shared hardware",
          "note": "Multi-frequency operation requires either lower Q, staggered tuning, or active cancellation."
        },
        {
          "field_a_term": "coupled-mode steady-state efficiency η(k, Q₁, Q₂)",
          "field_b_term": "peak efficiency only within the bandwidth where resonator approximation is valid",
          "note": "Efficiency formulas assume sinusoidal steady state inside the pole bandwidth."
        }
      ],
      "references": [
        {
          "doi": "10.1126/science.1143254",
          "note": "Kurs et al. (2007) — strongly coupled magnetic resonances (Science)."
        },
        {
          "doi": "10.1109/TPEL.2010.2073493",
          "note": "Sample et al. (2011) — coupled resonators for WPT; bandwidth and range adaptation (IEEE Trans. Power Electron.)."
        }
      ],
      "last_reviewed": "2026-05-07",
      "file": "cross-domain/engineering-physics/b-wpt-resonator-q-bandwidth-tradeoff-x-matching-network-coexistence.yaml"
    },
    {
      "id": "b-cybersecurity-adversarial-systems",
      "title": "Cybersecurity is an adversarial engineering-social science system: attacks exploit human and technical vulnerabilities simultaneously, defense-in-depth mirrors Stackelberg game equilibria, and the economics of cybercrime ($8T annually) make it larger than most national economies.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "Cybersecurity bridges engineering (technical attack/defense mechanisms) and social science (human behavior, economics, game theory). The CIA triad (Confidentiality, Integrity, Availability) provides the engineering framework; game theory provides the social science framework. Attack vectors span bot",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-stackelberg-equilibrium-predicts-security-market-underinvestment"
      ],
      "communication_gap": "Security engineers focus on technical controls and are trained in cryptography and systems; economists and game theorists focus on incentive structures and behavioral models. The security economics literature (Anderson, Moore, Böhme) bridges both but is not standard in either CS security curricula or economics graduate programs. Practitioners in each field often rediscover insights well-established in the other.\n",
      "translation_table": [
        {
          "field_a_term": "defense in depth (layered security controls)",
          "field_b_term": "mixed Nash equilibrium / redundancy in adversarial games",
          "note": "no dominant strategy; multiple independent defenses raise attacker cost"
        },
        {
          "field_a_term": "zero-day vulnerability market pricing",
          "field_b_term": "shadow market equilibrium; price = expected attacker surplus",
          "note": "Zerodium prices reveal revealed preference for attack difficulty"
        },
        {
          "field_a_term": "SQL injection / buffer overflow (technical exploits)",
          "field_b_term": "phishing / social engineering (human cognitive exploits)",
          "note": "both are attack surface; security requires engineering + behavioral defense"
        },
        {
          "field_a_term": "attacker's cost-benefit calculation",
          "field_b_term": "rational criminal behavior model (Becker 1968)",
          "note": "cybercrime supply responds to enforcement probability and penalty magnitude"
        },
        {
          "field_a_term": "security metrics (CVE score, breach cost)",
          "field_b_term": "economic externalities; firms underinvest due to uncompensated breach costs",
          "note": "negative externalities of data breaches are not fully borne by the breached firm"
        }
      ],
      "references": [
        {
          "note": "Anderson (2001) Security Engineering; Wiley"
        },
        {
          "doi": "10.1145/1435417.1435418",
          "note": "Moore et al. (2009) The economics of online crime; Commun ACM 52:78"
        },
        {
          "note": "Laszka et al. (2014) A survey of interdependent information security games; GameSec 2014"
        },
        {
          "note": "Morgan (2023) 2023 Official Cybercrime Report; Cybersecurity Ventures"
        }
      ],
      "last_reviewed": "2026-05-07",
      "file": "cross-domain/engineering-social-science/b-cybersecurity-adversarial-systems.yaml"
    },
    {
      "id": "b-infrastructure-cascade-failures",
      "title": "Buldyrev's interdependent network model predicts catastrophic discontinuous phase transitions in coupled infrastructure systems (power-grid/internet) — unlike single networks which fail gradually — proven by the 2003 Northeast Blackout (256 plants, 55M people) and formalised as NP-hard minimum-cost resilience recovery.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "Single-network percolation theory: a random graph with mean degree ⟨k⟩ has a giant connected component above a critical fraction p_c of remaining nodes — removal of (1−p_c) nodes causes gradual degradation. The 2003 Northeast Blackout demonstrated that real infrastructure is not a single network: a ",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-infrastructure-interdependence-discontinuous-collapse-empirical"
      ],
      "communication_gap": "Network science (physics) and infrastructure engineering have separate literatures (Phys Rev Lett vs. IEEE Trans Power Systems), separate conferences, and separate funding agencies (NSF Physics vs. DOE/DHS). After the 2003 blackout, the NERC investigation was conducted by power engineers who did not cite percolation theory; the Buldyrev paper appeared in Nature (2010) but was not immediately incorporated into NERC reliability standards or FEMA resilience frameworks. The social science dimension (who bears the cost of failures) is studied by sociologists and public policy researchers who rarely engage with the network topology literature.\n",
      "translation_table": [
        {
          "field_a_term": "giant connected component in random graph (percolation theory)",
          "field_b_term": "fraction of population with access to power / internet services"
        },
        {
          "field_a_term": "first-order phase transition in interdependent network percolation",
          "field_b_term": "catastrophic infrastructure collapse (abrupt, non-gradual failure)"
        },
        {
          "field_a_term": "critical fraction p_c (percolation threshold)",
          "field_b_term": "minimum system redundancy required to prevent cascade collapse"
        },
        {
          "field_a_term": "interdependency links between network A and B",
          "field_b_term": "SCADA control dependency of power grid on internet"
        },
        {
          "field_a_term": "resilience integral R = ∫Q(t)dt",
          "field_b_term": "total service disruption (person-hours without power/water)"
        },
        {
          "field_a_term": "Steiner tree minimum spanning recovery (NP-hard)",
          "field_b_term": "optimal infrastructure repair sequencing under budget constraint"
        }
      ],
      "references": [
        {
          "doi": "10.1038/nature08932",
          "note": "Buldyrev et al. (2010) Nature 464:1025 — catastrophic cascade failures in interdependent networks"
        },
        {
          "note": "Pourbeik et al. (2006) IEEE Power Energy Mag 4:22 — the anatomy of a power grid blackout (2003)"
        },
        {
          "doi": "10.1193/1.1623497",
          "note": "Bruneau et al. (2003) Earthquake Spectra 19:733 — framework to quantitatively assess seismic resilience"
        },
        {
          "doi": "10.1038/nphys2180",
          "note": "Gao et al. (2011) Nat Phys 8:40 — robustness of a network of networks"
        }
      ],
      "last_reviewed": "2026-05-06",
      "file": "cross-domain/engineering-social-science/b-infrastructure-cascade-failures.yaml"
    },
    {
      "id": "b-operations-research-market-design",
      "title": "Operations research (linear programming, matching algorithms) provides the computational backbone of modern market design — the Gale-Shapley deferred acceptance algorithm achieves stable matching in O(n²), kidney exchange is maximum-weight matching on compatibility graphs, and spectrum auctions are NP-hard combinatorial optimization problems in practice.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "Operations research (OR) develops algorithms for resource allocation under constraints. Market design applies these algorithms to real economic markets — transforming abstract optimization theory into institutions that match people to organs, students to schools, and spectrum licenses to telecoms.\nL",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-gale-shapley-deferred-acceptance-stability-uniqueness"
      ],
      "communication_gap": "OR was developed as an engineering discipline (Dantzig, RAND Corporation, 1940s-60s) with applications to logistics and military operations. Market design emerged in economics (Roth, Wilson) in the 1980s-90s largely independently. The explicit connection — that market design is applied OR — was made by practitioners (Roth brought in OR specialists for kidney exchange algorithms) but is not standard in economics PhD curricula. Operations Research journals (Management Science, Operations Research) and economics journals (American Economic Review) have low cross-citation rates despite shared mathematical foundations.\n",
      "translation_table": [
        {
          "field_a_term": "stable matching (Gale-Shapley)",
          "field_b_term": "maximum weight matching with stability constraints on bipartite graph",
          "note": "Stability = Nash equilibrium of decentralized matching game"
        },
        {
          "field_a_term": "deferred acceptance (DA) algorithm",
          "field_b_term": "iterative auction where highest bidder holds tentative allocation",
          "note": "DA is strategy-proof for the proposing side — truthful preference revelation is optimal"
        },
        {
          "field_a_term": "kidney exchange compatibility graph",
          "field_b_term": "maximum weight cycle cover problem (NP-hard for k>2 cycles)",
          "note": "Practical solution: integer programming with cycle/chain length constraints"
        },
        {
          "field_a_term": "LP dual variables (shadow prices)",
          "field_b_term": "equilibrium prices in competitive market (Walrasian equilibrium)",
          "note": "By LP duality, every LP optimum corresponds to competitive equilibrium prices"
        },
        {
          "field_a_term": "FCC spectrum repacking constraint satisfaction",
          "field_b_term": "NP-hard combinatorial auction design",
          "note": "Feasibility check (can n stations be repacked into fewer channels?) is NP-complete"
        },
        {
          "field_a_term": "strategy-proof mechanism",
          "field_b_term": "dominant strategy incentive compatible (DSIC) mechanism",
          "note": "DA is DSIC for proposers (Roth 1982); not for receivers — hospitals can game"
        }
      ],
      "references": [
        {
          "note": "Dantzig (1963) Linear Programming and Extensions. Princeton University Press."
        },
        {
          "doi": "10.2307/2312726",
          "note": "Gale & Shapley (1962) Am Math Mon 69:9 — college admissions and stable marriage"
        },
        {
          "doi": "10.1257/aer.89.4.748",
          "note": "Roth & Peranson (1999) Am Econ Rev 89:748 — NRMP redesign using DA"
        },
        {
          "doi": "10.1126/science.1154938",
          "note": "Roth (2008) Science 319:1069 — kidney exchange as market design"
        }
      ],
      "last_reviewed": "2026-05-06",
      "file": "cross-domain/engineering-social-science/b-operations-research-market-design.yaml"
    },
    {
      "id": "b-smart-cities-urban-data-analytics",
      "title": "Smart city platforms bridge engineering control theory and social science: IoT sensor networks feed model predictive control for traffic and energy optimization, while differential privacy mechanisms address the fundamental tension between urban data utility and individual rights.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "Smart city platforms aggregate IoT sensor data (traffic flow, air quality, energy consumption, pedestrian density) for real-time urban management. The data pipeline runs from edge computing (latency <10 ms for local decisions) through fog-layer aggregation to cloud ML inference for city-scale optimi",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-differential-privacy-urban-analytics-accuracy-threshold"
      ],
      "communication_gap": "Smart city engineering is dominated by computer science and control engineering conferences (IEEE, ACM) while the equity, privacy, and governance implications are studied in urban planning, sociology, and law journals. Privacy engineers and civil society organizations occupy different professional worlds. The differential privacy literature (theory CS) is rarely read by urban planners, and vice versa.\n",
      "translation_table": [
        {
          "field_a_term": "model predictive control (MPC) receding horizon",
          "field_b_term": "sequential social decision-making under uncertainty",
          "note": "MPC formalizes the engineering intuition of rolling planning; social planners face the same structure"
        },
        {
          "field_a_term": "IoT sensor network data stream",
          "field_b_term": "behavioral trace data (mobility, consumption, social interaction)",
          "note": "The same data stream is simultaneously an engineering input and a social surveillance concern"
        },
        {
          "field_a_term": "differential privacy parameter epsilon (ε)",
          "field_b_term": "privacy-utility tradeoff — social norm boundary",
          "note": "ε is an engineering parameter but its acceptable value is a social/political choice"
        },
        {
          "field_a_term": "peak demand reduction 15% (HVAC optimization)",
          "field_b_term": "demand elasticity — behavioral response to dynamic pricing",
          "note": "Engineering optimization requires a social-science model of occupant behavior"
        },
        {
          "field_a_term": "system-optimal traffic assignment (Wardrop second principle)",
          "field_b_term": "social welfare maximization vs. Nash equilibrium (user equilibrium)",
          "note": "System-optimal routing requires coordination that individual drivers do not spontaneously produce"
        }
      ],
      "references": [
        {
          "note": "Batty (2013) The New Science of Cities. MIT Press"
        },
        {
          "note": "Zheng et al. (2014) ACM Trans Intell Syst Technol 5:29 — urban computing survey"
        },
        {
          "note": "Dwork (2006) ICALP LNCS 4052:1 — differential privacy definition"
        },
        {
          "note": "Albino et al. (2015) Cities 45:1 — smart city concepts review"
        }
      ],
      "last_reviewed": "2026-05-06",
      "file": "cross-domain/engineering-social-science/b-smart-cities-urban-data-analytics.yaml"
    },
    {
      "id": "b-next-generation-matrix-control-epidemic-interventions",
      "title": "Next-generation-matrix epidemiology provides a control-oriented state-space abstraction for adaptive intervention policies targeting dominant transmission modes.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "The next-generation matrix (NGM) decomposes compartmental transmission into mode-specific reproduction gains. This maps naturally to control concepts: interventions act as structured gain reductions that should drive the spectral radius below unity while minimizing policy cost and oscillatory oversh",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-mpc-with-ngm-constraints-reduces-epidemic-overshoot"
      ],
      "communication_gap": "Epidemiology often reports R_t as a scalar summary, while controller design requires explicit mode structure and actuation limits across subpopulations.\n",
      "translation_table": [
        {
          "field_a_term": "spectral radius rho(K) of NGM",
          "field_b_term": "closed-loop instability indicator",
          "note": "rho(K) > 1 corresponds to growth mode amplification."
        },
        {
          "field_a_term": "targeted NPIs/vaccination by subgroup",
          "field_b_term": "structured feedback gain scheduling",
          "note": "Interventions are matrix-entry perturbations, not scalar knobs."
        },
        {
          "field_a_term": "variant emergence and contact shifts",
          "field_b_term": "time-varying plant uncertainty",
          "note": "Robust adaptive control formulations capture changing transmission topology."
        }
      ],
      "references": [
        {
          "doi": "10.1007/BF00178324",
          "note": "Diekmann, Heesterbeek, and Metz (1990), formal definition and computation of R0 via next-generation operators."
        }
      ],
      "last_reviewed": "2026-05-08",
      "file": "cross-domain/epidemiology-control-engineering/b-next-generation-matrix-control-epidemic-interventions.yaml"
    },
    {
      "id": "b-federated-averaging-x-multisite-epidemic-forecasting",
      "title": "Federated averaging bridges distributed optimization and multi-site epidemic forecasting when patient-level data sharing is constrained.",
      "source_domain": "",
      "target_domain": "",
      "status": "proposed",
      "bridge_claim": "Speculative analogy (to be empirically validated): FedAvg-style decentralized optimization can combine geographically distributed surveillance models while preserving local governance constraints and reducing centralization bottlenecks.",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-federated-ensembles-improve-cross-site-epidemic-generalization"
      ],
      "communication_gap": "Public-health teams focus on policy-timely reliability, while federated ML studies often optimize communication efficiency on benchmark datasets.",
      "translation_table": [
        {
          "field_a_term": "client update",
          "field_b_term": "site-specific epidemiologic model gradient",
          "note": "Each health system contributes local parameter shifts."
        },
        {
          "field_a_term": "server aggregation",
          "field_b_term": "regional consensus forecast",
          "note": "Weighted aggregation approximates pooled learning without raw-data transfer."
        },
        {
          "field_a_term": "non-IID drift",
          "field_b_term": "cross-site outbreak heterogeneity",
          "note": "Performance depends on heterogeneity-aware aggregation."
        }
      ],
      "references": [
        {
          "arxiv": "1602.05629",
          "note": "Communication-Efficient Learning of Deep Networks from Decentralized Data (FedAvg)."
        },
        {
          "url": "https://www.who.int/emergencies/surveillance",
          "note": "WHO disease surveillance context."
        }
      ],
      "last_reviewed": "2026-05-08",
      "file": "cross-domain/epidemiology-machine-learning/b-federated-averaging-x-multisite-epidemic-forecasting.yaml"
    },
    {
      "id": "b-epidemic-ensemble-kalman-filter",
      "title": "Epidemic state estimation is a nonlinear filtering problem: the ensemble Kalman filter (EnKF) recursively updates SIR compartment parameters from case report observations, combining data assimilation with mechanistic disease models",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "The SIR epidemic model with time-varying transmission rate β(t) defines a dynamical system: dS/dt=-βSI/N, dI/dt=βSI/N-γI, dR/dt=γI. Case reports y_t (new cases per day) are noisy observations of the state. The ensemble Kalman filter (EnKF) treats the epidemic as a data assimilation problem: maintain",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-epidemic-ensemble-kalman-filter"
      ],
      "communication_gap": "Epidemiologists use Rt estimators (EpiEstim, EpiNow2) based on the renewal equation without connection to the data assimilation literature from numerical weather prediction. Data assimilationists in atmospheric science use EnKF routinely but rarely apply it to epidemiological compartment models. The mathematical identity between weather forecasting and epidemic forecasting as filtering problems was only highlighted systematically during COVID-19.\n",
      "translation_table": [
        {
          "field_a_term": "SIR compartment state (S, I, R, β) at time t",
          "field_b_term": "hidden state vector x in the state-space model",
          "note": "Augmented state includes β as a parameter to be estimated online"
        },
        {
          "field_a_term": "daily reported case counts y_t",
          "field_b_term": "observation vector H·x + ε in the measurement model",
          "note": "H maps state to expected cases (Poisson distributed); ε is observation noise"
        },
        {
          "field_a_term": "ensemble spread (width of posterior distribution over states)",
          "field_b_term": "forecast uncertainty σ_f in Kalman filter formalism",
          "note": "Wide ensemble = high uncertainty; narrow ensemble = confident state estimate"
        },
        {
          "field_a_term": "R_eff(t) time series with credible intervals",
          "field_b_term": "online parameter estimation from sequential data assimilation",
          "note": "EnKF gives full posterior over R_eff each day, unlike window-based Rt estimators"
        }
      ],
      "references": [
        {
          "doi": "10.1175/1520-0493(2001)129<0420:AESLPF>2.0.CO;2",
          "note": "Evensen (2003) The ensemble Kalman filter — theoretical formulation and practical implementation. Ocean Dyn 53:343"
        },
        {
          "doi": "10.1098/rsos.171833",
          "note": "Birrell et al. (2018) Evidence synthesis for stochastic epidemic models. Statist Sci 33:34"
        },
        {
          "doi": "10.1073/pnas.2006520117",
          "note": "Shaman & Karspeck (2012) Forecasting seasonal outbreaks of influenza. PNAS 109:20425"
        }
      ],
      "last_reviewed": "2026-05-07",
      "file": "cross-domain/epidemiology-mathematics/b-epidemic-ensemble-kalman-filter.yaml"
    },
    {
      "id": "b-floquet-stability-x-seasonal-epidemic-forcing-windows",
      "title": "Floquet stability analysis links periodic forcing theory to seasonal epidemic intervention windows.",
      "source_domain": "",
      "target_domain": "",
      "status": "proposed",
      "bridge_claim": "Speculative analogy: Seasonal transmission models can be interpreted as periodically forced oscillators where Floquet multipliers identify when small policy perturbations most effectively suppress outbreaks.",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-floquet-instability-metrics-improve-seasonal-epi-intervention-timing"
      ],
      "communication_gap": "Communities use different terminology and validation conventions, masking transferable method equivalence.",
      "translation_table": [],
      "references": [
        {
          "url": "https://royalsocietypublishing.org/doi/10.1098/rspb.2009.2207",
          "note": "Seasonal forcing in infectious disease dynamics."
        }
      ],
      "last_reviewed": "2026-05-08",
      "file": "cross-domain/epidemiology-mathematics/b-floquet-stability-x-seasonal-epidemic-forcing-windows.yaml"
    },
    {
      "id": "b-mori-zwanzig-memory-kernels-x-epidemic-model-reduction",
      "title": "Mori-Zwanzig memory-kernel reduction offers a principled bridge between high-dimensional contact dynamics and compact epidemic models.",
      "source_domain": "",
      "target_domain": "",
      "status": "proposed",
      "bridge_claim": "Projecting unresolved contact-network dynamics into memory terms can improve reduced epidemic models beyond Markov SEIR approximations. This bridge is explicitly speculative until validated on prospective outbreak datasets.",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-memory-augmented-seir-improves-forecast-turning-points"
      ],
      "communication_gap": "Adjacent communities use different software stacks and validation norms, so mathematically equivalent tools are often rediscovered in parallel.",
      "translation_table": [
        {
          "field_a_term": "Resolved variables projection",
          "field_b_term": "Compartment-level epidemic states",
          "note": "Retains observable dimensions while compressing latent structure."
        },
        {
          "field_a_term": "Memory kernel",
          "field_b_term": "Delayed effective transmission feedback",
          "note": "Captures non-Markov effects from unresolved contacts."
        },
        {
          "field_a_term": "Noise term",
          "field_b_term": "Unmodeled behavioral fluctuation",
          "note": "Represents stochastic forcing from hidden processes."
        }
      ],
      "references": [
        {
          "doi": "10.1098/rsta.1922.0009",
          "note": "Fisher (1922) estimation and information."
        },
        {
          "doi": "10.1017/S0962492910000061",
          "note": "Stuart (2010) Bayesian inverse-problem foundations."
        }
      ],
      "last_reviewed": "2026-05-08",
      "file": "cross-domain/epidemiology-mathematics/b-mori-zwanzig-memory-kernels-x-epidemic-model-reduction.yaml"
    },
    {
      "id": "b-pandemic-optimal-stopping",
      "title": "Optimal epidemic intervention timing is an optimal stopping problem where the decision to implement NPIs minimizes total social cost, with the threshold case count derived from the ratio of NPI costs to transmission reduction benefit",
      "source_domain": "",
      "target_domain": "",
      "status": "proposed",
      "bridge_claim": "The decision to implement non-pharmaceutical interventions (NPIs) during a growing epidemic is an optimal stopping problem with value function V(I, t) = min_{tau} E[C(I, t, tau)], where the optimal stopping threshold I* = alpha / (beta * delta_R) balances NPI cost alpha against transmission reductio",
      "open_unknowns": [],
      "related_hypotheses": [],
      "communication_gap": "Public health officials make intervention decisions based on epidemiological thresholds and political considerations while decision theorists study optimal stopping; the formal connection between epidemic decision-making and optimal stopping theory is underexplored in both public health training and decision theory curricula.",
      "translation_table": [
        {
          "field_a_term": "epidemic incidence I(t) on day t of outbreak",
          "field_b_term": "state variable in optimal stopping problem",
          "note": "Incidence is the observable; the stopping rule specifies at which I(t) to intervene"
        },
        {
          "field_a_term": "NPIs (lockdowns, school closures, mask mandates)",
          "field_b_term": "control action in stochastic optimal control formulation",
          "note": "NPI implementation corresponds to stopping the uncontrolled epidemic process and switching to controlled dynamics"
        },
        {
          "field_a_term": "reproduction number R0 -> Rc under NPI",
          "field_b_term": "drift coefficient change in SIR stochastic process",
          "note": "NPIs reduce beta, changing Brownian drift from positive (growing epidemic) to negative (declining)"
        },
        {
          "field_a_term": "herd immunity threshold HIT = 1 - 1/R0",
          "field_b_term": "terminal boundary condition in finite-horizon optimal stopping",
          "note": "Interventions before HIT is reached can avoid the full epidemic; stopping late (I > HIT S0) is suboptimal"
        }
      ],
      "references": [
        {
          "doi": "10.1098/rsif.2020.0435",
          "note": "Alvarez et al. (2020) J R Soc Interface - optimal lockdown in the SIR model: cost-benefit analysis"
        },
        {
          "doi": "10.1073/pnas.2014347118",
          "note": "Berger et al. (2020) PNAS - optimal testing and containment decisions under uncertainty"
        },
        {
          "doi": "10.1017/S0266466606060294",
          "note": "Chick (2005) - Bayesian sequential clinical trial design as optimal stopping"
        }
      ],
      "last_reviewed": "2026-05-07",
      "file": "cross-domain/epidemiology-mathematics/b-pandemic-optimal-stopping.yaml"
    },
    {
      "id": "b-sir-network-percolation-threshold",
      "title": "The epidemic threshold R₀ = 1 in the SIR model is mathematically identical to the bond-percolation threshold on the contact network: an epidemic spreads to a macroscopic fraction of the population if and only if the transmission bond-occupation probability exceeds the percolation critical point p_c, and the final epidemic size equals the size of the giant percolation cluster.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "In an SIR epidemic on a contact network, each edge (i,j) is independently occupied with probability T = 1 − exp(−βτ) (transmission probability × infectious period). The expected outbreak size from a single seed equals the expected cluster size in bond percolation at occupation probability T. The epi",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-targeted-vaccination-percolation-optimality"
      ],
      "communication_gap": "Epidemiologists traditionally frame R₀ in terms of individual transmission rates, while statistical physicists frame percolation as a phase transition on graphs. Pastor-Satorras & Vespignani (2001) stated the equivalence for scale-free networks; Newman (2002) proved it for general degree distributions. The equivalence is known to network scientists but rarely appears in CDC/WHO epidemiological guidance.\n",
      "translation_table": [
        {
          "field_a_term": "Bond occupation probability T (SIR transmissibility)",
          "field_b_term": "Percolation bond probability p",
          "note": "T = 1−exp(−βτ); p=T at the mapping; p > p_c ↔ R₀ > 1"
        },
        {
          "field_a_term": "Giant connected component (percolation)",
          "field_b_term": "Final epidemic size (fraction ever infected)",
          "note": "Both equal the probability that an initial node is in the giant cluster"
        },
        {
          "field_a_term": "Percolation threshold p_c = ⟨k⟩/⟨k²⟩",
          "field_b_term": "Epidemic threshold R₀ = 1 (T_c = p_c on the network)",
          "note": "Identical quantities; p_c → 0 on scale-free networks ↔ vanishing epidemic threshold"
        },
        {
          "field_a_term": "Targeted immunisation = node removal (percolation)",
          "field_b_term": "Vaccination strategy in an epidemic network",
          "note": "Removing high-degree nodes fragments the giant component faster than random removal"
        }
      ],
      "references": [
        {
          "doi": "10.1103/PhysRevLett.86.3200",
          "note": "Pastor-Satorras & Vespignani (2001) PRL – epidemic spreading on scale-free networks; vanishing threshold"
        },
        {
          "doi": "10.1103/PhysRevE.66.016128",
          "note": "Newman (2002) PRE – formal proof of SIR–percolation equivalence on arbitrary degree distributions"
        },
        {
          "doi": "10.1103/PhysRevLett.85.4626",
          "note": "Moore & Newman (2000) PRL – epidemics and percolation in small-world networks"
        },
        {
          "doi": "10.1126/science.1248506",
          "note": "Brockmann & Helbing (2013) Science – global epidemic spreading via effective-distance network geometry"
        }
      ],
      "last_reviewed": "2026-05-07",
      "file": "cross-domain/epidemiology-mathematics/b-sir-network-percolation-threshold.yaml"
    },
    {
      "id": "b-vaccination-threshold-herd-immunity-sir",
      "title": "The vaccination threshold for herd immunity is derived analytically from the SIR mathematical model: the critical vaccination fraction p_c = 1 - 1/R₀ ensures the effective reproduction number R_eff < 1, so that epidemic invasion fails when a sufficient fraction of the population is immune.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "The SIR model gives dI/dt = βSI - γI = γI(R₀·S/N - 1), so the epidemic grows (dI/dt > 0) only when S/N > 1/R₀. If a fraction p of the population is vaccinated (assumed perfectly, pre-epidemic), then initial susceptible fraction s₀ = 1 - p. The epidemic fails to invade when s₀ < 1/R₀, yielding the he",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-superspreader-driven-epidemics-lower-herd-immunity-threshold"
      ],
      "communication_gap": "The SIR-derived herd immunity threshold has been public health policy guidance since Kermack-McKendrick (1927), but translating the mathematical assumptions (homogeneous mixing, perfect immunity, fixed R₀) to real policy contexts remains challenging; network epidemiology and vaccine waning models complicate the clean 1-1/R₀ formula, and the communication gap between modelers and policymakers is significant.\n",
      "translation_table": [
        {
          "field_a_term": "herd immunity threshold (epidemiology)",
          "field_b_term": "epidemic extinction condition s₀ < 1/R₀ (mathematical biology)",
          "note": "Same concept; the threshold is derived analytically from invasion criterion"
        },
        {
          "field_a_term": "basic reproduction number R₀ (epidemiology)",
          "field_b_term": "spectral radius of next-generation matrix (mathematical biology)",
          "note": "R₀ is the dominant eigenvalue of the NGM; herd immunity threshold follows directly"
        },
        {
          "field_a_term": "vaccine coverage fraction p (epidemiology)",
          "field_b_term": "initial removed fraction in SIR model (mathematical biology)",
          "note": "Vaccinated individuals start in the R compartment, reducing effective S₀"
        },
        {
          "field_a_term": "outbreak extinction (epidemiology)",
          "field_b_term": "dI/dt < 0 at t=0 (mathematical biology)",
          "note": "Epidemic fails to grow if initial effective reproduction number R_eff = R₀·s₀ < 1"
        }
      ],
      "references": [
        {
          "doi": "10.1098/rspa.1927.0118",
          "note": "Kermack & McKendrick (1927) - contribution to mathematical theory of epidemics (original SIR)"
        },
        {
          "doi": "10.1093/aje/kwm088",
          "note": "Fine et al. (2011) - herd immunity — a rough guide"
        },
        {
          "doi": "10.1098/rsif.2010.0686",
          "note": "Diekmann et al. (2010) - construction of next-generation matrices for compartmental epidemic models"
        }
      ],
      "last_reviewed": "2026-05-07",
      "file": "cross-domain/epidemiology-mathematics/b-vaccination-threshold-herd-immunity-sir.yaml"
    },
    {
      "id": "b-openalex-network-epidemic-percolation",
      "title": "Epidemic spread on contact networks is mathematically equivalent to bond percolation, where infection probability plays the role of bond occupation probability and the epidemic threshold corresponds to the percolation transition — enabling network topology to predict outbreak potential before any pathogen-specific parameters are measured.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "proposed",
      "bridge_claim": "Huang et al. (2020, 51 k citations) documented the clinical features of SARS-CoV-2, revealing explosive network-mediated spread through close-contact clusters. Network science and statistical physics provide the theoretical foundation for this behavior: the SIR epidemic process on a contact network ",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-sars-cov2-network-percolation"
      ],
      "communication_gap": "Epidemiologists are trained in compartmental models (SIR/SEIR) without network topology; network scientists and physicists rarely engage with empirical contact data from outbreak investigations. The percolation-epidemic equivalence was established theoretically but has not fully penetrated public health practice. During COVID-19, most policy models used homogeneous mixing assumptions, missing the network structure that drives superspreading events.\n",
      "translation_table": [
        {
          "field_a_term": "bond occupation probability p",
          "field_b_term": "transmission probability T = 1 - exp(-beta*tau)",
          "note": "The probability that an infected contact transmits to a susceptible neighbour"
        },
        {
          "field_a_term": "percolation threshold p_c",
          "field_b_term": "epidemic threshold R_0 = 1",
          "note": "The critical point below which only finite clusters (outbreaks) occur"
        },
        {
          "field_a_term": "giant connected component",
          "field_b_term": "epidemic (infecting O(N) individuals)",
          "note": "Above threshold, a macroscopic fraction of the population is reached"
        },
        {
          "field_a_term": "degree distribution P(k)",
          "field_b_term": "contact degree distribution (number of contacts per person)",
          "note": "Power-law tails dramatically lower the percolation threshold"
        },
        {
          "field_a_term": "targeted site removal (immunisation of hubs)",
          "field_b_term": "ring vaccination / targeted vaccination of superspreaders",
          "note": "Removing high-degree nodes most efficiently fragments the giant component"
        }
      ],
      "references": [
        {
          "doi": "10.1016/s0140-6736(20)30183-5",
          "note": "Huang et al. (2020) Clinical features of COVID-19 — 51,666 citations; primary reference"
        },
        {
          "doi": "10.1103/PhysRevLett.86.3200",
          "note": "Newman et al. (2001) — random graphs with arbitrary degree distributions and percolation"
        },
        {
          "doi": "10.1103/PhysRevE.66.016128",
          "note": "Newman (2002) — spread of epidemic disease on networks"
        }
      ],
      "last_reviewed": "2026-05-07",
      "file": "cross-domain/epidemiology-network-science/b-openalex-network-epidemic-percolation.yaml"
    },
    {
      "id": "b-percolation-thresholds-x-antimicrobial-combination-therapy-networks",
      "title": "Percolation thresholds can connect habitat-fragmentation mathematics to antimicrobial combination network design.",
      "source_domain": "",
      "target_domain": "",
      "status": "proposed",
      "bridge_claim": "Speculative analogy: Percolation thresholds can connect habitat-fragmentation mathematics to antimicrobial combination network design.",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-percolation-aware-combination-selection-delays-resistance-network-percolation"
      ],
      "communication_gap": "Domain-specific vocabularies and benchmark conventions obscure transferable mathematical structure.",
      "translation_table": [],
      "references": [
        {
          "doi": "10.1103/PhysRevE.58.R5257",
          "note": "Percolation threshold formalism for network connectivity transitions."
        }
      ],
      "last_reviewed": "2026-05-08",
      "file": "cross-domain/epidemiology-network-science/b-percolation-thresholds-x-antimicrobial-combination-therapy-networks.yaml"
    },
    {
      "id": "b-sir-percolation",
      "title": "The SIR epidemic model is bond percolation on a contact network — the epidemic threshold 1/R₀ equals the percolation threshold p_c, and herd immunity is the destruction of the giant connected component of susceptible individuals.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "The classic SIR (Susceptible-Infected-Recovered) compartmental epidemic model maps exactly onto bond percolation on the underlying contact network. Each person is a node; each potentially infectious contact is a bond that is \"occupied\" with probability T = 1 - exp(-β/γ), the transmissibility. An epi",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-scale-free-epidemic-threshold-vaccination"
      ],
      "communication_gap": "Compartmental SIR models (Kermack-McKendrick) predate network science by six decades. Epidemiologists trained on ODEs often view network models as complex extensions requiring simulation, not as exact instances of a solved problem in statistical physics. The percolation connection was formalized by Newman (2002) and Pastor-Satorras & Vespignani (2001) but remains underutilized in public health modeling. The fields publish in non-overlapping journals (PLOS Pathogens, Epidemiology vs. Physical Review E, Journal of Statistical Physics).\n",
      "translation_table": [
        {
          "field_a_term": "susceptible individual",
          "field_b_term": "unoccupied lattice site in the percolation problem",
          "note": "Each susceptible person is a node that can be reached by the epidemic"
        },
        {
          "field_a_term": "infectious contact (transmission event)",
          "field_b_term": "occupied bond in bond percolation",
          "note": "A bond is occupied with probability T = transmissibility"
        },
        {
          "field_a_term": "epidemic threshold (R₀ = 1)",
          "field_b_term": "percolation threshold (bond occupancy p = p_c)",
          "note": "Both are the critical conditions for a spanning cluster / macroscopic outbreak"
        },
        {
          "field_a_term": "final epidemic size",
          "field_b_term": "size of the giant connected component S",
          "note": "In the infinite-size limit both are exact via the same self-consistency equation"
        },
        {
          "field_a_term": "basic reproduction number R₀",
          "field_b_term": "network epidemic spreading parameter κ = ⟨k²⟩/⟨k⟩",
          "note": "R₀ = T·κ for a network SIR model"
        },
        {
          "field_a_term": "herd immunity threshold",
          "field_b_term": "node fraction removed to destroy the giant component",
          "note": "Must account for network heterogeneity — not simply 1 - 1/R₀"
        },
        {
          "field_a_term": "random vaccination",
          "field_b_term": "random node removal (site percolation)",
          "note": "Effective but inefficient on scale-free networks"
        },
        {
          "field_a_term": "targeted hub vaccination",
          "field_b_term": "targeted high-degree node removal (acquaintance immunisation)",
          "note": "Disrupts giant component with far fewer removed nodes"
        },
        {
          "field_a_term": "scale-free contact network (γ ≤ 3)",
          "field_b_term": "network with diverging ⟨k²⟩ → zero percolation threshold",
          "note": "Explains persistence of STIs and why scale-free networks are epidemic-prone"
        }
      ],
      "references": [
        {
          "doi": "10.1103/PhysRevE.66.016128",
          "note": "Newman (2002) — exact mapping of SIR to bond percolation with generating functions"
        },
        {
          "doi": "10.1103/PhysRevLett.86.3200",
          "note": "Pastor-Satorras & Vespignani (2001) — epidemic spreading on scale-free networks, p_c → 0"
        },
        {
          "note": "Anderson & May (1991) Infectious Diseases of Humans. Oxford University Press."
        },
        {
          "doi": "10.1098/rsif.2005.0051",
          "note": "Keeling & Eames (2005) Networks and epidemic models — J R Soc Interface 2:295"
        }
      ],
      "last_reviewed": "2026-05-06",
      "file": "cross-domain/epidemiology-network-science/b-sir-percolation.yaml"
    },
    {
      "id": "b-cultural-transmission-sir-models",
      "title": "Cultural beliefs, practices, and memes spread through populations via social contact in a manner mathematically equivalent to the SIR epidemiological model: a basic reproduction number R_0 = beta*N/gamma governs whether a cultural innovation reaches epidemic prevalence or dies out, and herd-immunity thresholds predict when a competing norm can displace an incumbent",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "Cultural transmission models (Cavalli-Sforza & Feldman oblique transmission, Henrich's prestige-biased learning) can be mapped onto SIR compartmental dynamics: susceptibles S are individuals who have not yet adopted a practice, infectives I are active adopters who transmit by social influence at rat",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-cultural-sir-meme-herd-immunity"
      ],
      "communication_gap": "Social scientists studying cultural change use evolutionary and decision-theoretic frameworks while epidemiologists apply deterministic and stochastic compartmental models; the SIR analogy is frequently invoked in media studies but rarely made mathematically rigorous with calibrated transmission rate estimates.",
      "translation_table": [
        {
          "field_a_term": "susceptible non-adopter S in cultural dynamics (social science)",
          "field_b_term": "susceptible S compartment in SIR model (epidemiology)",
          "note": "Individuals not yet exposed to or persuaded by a cultural innovation"
        },
        {
          "field_a_term": "active cultural adopter who proselytizes (social science)",
          "field_b_term": "infectious I compartment (epidemiology)",
          "note": "Active adopters transmit the practice through social contact at rate proportional to beta*S*I"
        },
        {
          "field_a_term": "abandonment or disinterest rate in cultural practice (social science)",
          "field_b_term": "recovery rate gamma in SIR (epidemiology)",
          "note": "Individuals cease active transmission; in cultural models this can be reversible unlike biological recovery"
        },
        {
          "field_a_term": "social network structure (degree distribution, clustering) (social science)",
          "field_b_term": "contact network heterogeneity in network epidemiology (epidemiology)",
          "note": "Both fields find that hubs and homophily alter the effective R_0 relative to the homogeneous-mixing assumption"
        }
      ],
      "references": [
        {
          "doi": "10.1126/science.1182238",
          "note": "Goffman & Newill (1964) - generalisation of epidemic theory: application to cultural transmission"
        },
        {
          "doi": "10.1007/978-3-642-14707-1",
          "note": "Acemoglu & Robinson (2012) - cultural transmission as epidemiological diffusion (review context)"
        },
        {
          "doi": "10.1073/pnas.1202038109",
          "note": "Cointet & Roth (2009) - how realistic should knowledge diffusion models be? SIR calibration"
        }
      ],
      "last_reviewed": "2026-05-07",
      "file": "cross-domain/epidemiology-social-science/b-cultural-transmission-sir-models.yaml"
    },
    {
      "id": "b-causal-inference-negative-controls-x-observational-pharmacovigilance",
      "title": "Negative-control causal inference bridges epidemiologic bias diagnostics and observational pharmacovigilance signal triage.",
      "source_domain": "",
      "target_domain": "",
      "status": "proposed",
      "bridge_claim": "Speculative analogy: Negative-control exposure and outcome designs can be operationalized as bias sentinels in pharmacovigilance pipelines before elevating safety signals.",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-negative-control-calibrated-estimators-reduce-pharmacovigilance-signal-bias"
      ],
      "communication_gap": "Communities use different terminology and validation conventions, masking transferable method equivalence.",
      "translation_table": [],
      "references": [
        {
          "url": "https://www.ncbi.nlm.nih.gov/pmc/articles/PMC3053408/",
          "note": "Negative controls in epidemiology."
        }
      ],
      "last_reviewed": "2026-05-08",
      "file": "cross-domain/epidemiology-statistics/b-causal-inference-negative-controls-x-observational-pharmacovigilance.yaml"
    },
    {
      "id": "b-extreme-value-theory-x-antimicrobial-resistance-surveillance",
      "title": "Extreme-value theory offers a common tail-risk language for antimicrobial-resistance emergence surveillance.",
      "source_domain": "",
      "target_domain": "",
      "status": "proposed",
      "bridge_claim": "Speculative analogy: Extreme-value theory offers a common tail-risk language for antimicrobial-resistance emergence surveillance.",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-peaks-over-threshold-models-improve-amr-outbreak-early-warning"
      ],
      "communication_gap": "The two communities use different notation, benchmarks, and publication venues, which obscures transferable mathematical structure.",
      "translation_table": [],
      "references": [
        {
          "doi": "10.1214/aoms/1177731924",
          "note": "Classical extreme-value limit theory."
        }
      ],
      "last_reviewed": "2026-05-08",
      "file": "cross-domain/epidemiology-statistics/b-extreme-value-theory-x-antimicrobial-resistance-surveillance.yaml"
    },
    {
      "id": "b-sequential-probability-ratio-test-x-pathogen-genomic-surveillance",
      "title": "Sequential probability ratio testing maps naturally to real-time pathogen genomic surveillance trigger design.",
      "source_domain": "",
      "target_domain": "",
      "status": "proposed",
      "bridge_claim": "Speculative analogy: Sequential probability ratio testing maps naturally to real-time pathogen genomic surveillance trigger design.",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-adaptive-sprt-alerting-detects-concerning-pathogen-variants-earlier-than-fixed-window-rules"
      ],
      "communication_gap": "The two communities use different notation, benchmarks, and publication venues, which obscures transferable mathematical structure.",
      "translation_table": [],
      "references": [
        {
          "doi": "10.1214/aoms/1177731118",
          "note": "Wald SPRT decision framework."
        }
      ],
      "last_reviewed": "2026-05-08",
      "file": "cross-domain/epidemiology-statistics/b-sequential-probability-ratio-test-x-pathogen-genomic-surveillance.yaml"
    },
    {
      "id": "b-epigenetic-clocks-aging-biomarkers",
      "title": "DNA methylation epigenetic clocks are quantitative aging biomarkers that predict chronological and biological age with sub-decade accuracy, bridging epigenetics and geroscience by operationalizing the rate of biological aging.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "Epigenetic clocks (Horvath, Hannum, PhenoAge, GrimAge) use penalized regression on CpG methylation levels across hundreds of genomic loci to construct a quantitative biomarker of biological age that predicts chronological age (r > 0.96), all-cause mortality, age-related disease incidence, and respon",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-epigenetic-reprogramming-lifespan-extension"
      ],
      "communication_gap": "Epigeneticists study methylation mechanistically while geriatricians and geroscientists focus on functional aging outcomes; epigenetic clocks bridge these but the causal mechanism linking methylation patterns to physiological aging remains debated, limiting their use as intervention targets rather than just biomarkers.\n",
      "translation_table": [
        {
          "field_a_term": "CpG methylation beta values (epigenetics)",
          "field_b_term": "biological age biomarker features (geroscience)",
          "note": "Specific CpG sites change monotonically with age and are combined in a linear predictor"
        },
        {
          "field_a_term": "epigenetic age acceleration (epigenetics)",
          "field_b_term": "biological age - chronological age difference (geroscience)",
          "note": "Positive acceleration predicts increased mortality risk; linked to lifestyle and disease"
        },
        {
          "field_a_term": "methylation drift at enhancers (epigenetics)",
          "field_b_term": "erosion of youthful gene expression programs (geroscience)",
          "note": "Loss of methylation at repressed repeats and gain at developmental enhancers with age"
        },
        {
          "field_a_term": "partial reprogramming (Yamanaka factors) (epigenetics)",
          "field_b_term": "epigenetic age reversal / rejuvenation (geroscience)",
          "note": "OSKM expression resets epigenetic clocks; provides causal link between epigenome and aging"
        }
      ],
      "references": [
        {
          "doi": "10.18632/aging.100433",
          "note": "Horvath (2013) - DNA methylation age of human tissues and cell types (pan-tissue clock)"
        },
        {
          "doi": "10.1038/s43587-021-00134-3",
          "note": "Lu et al. (2023) - GrimAge2 clock predicting mortality and morbidity"
        },
        {
          "doi": "10.1038/s41586-023-05892-5",
          "note": "Partial reprogramming in vivo reverses epigenetic age in mice (Sinclair lab 2023)"
        }
      ],
      "last_reviewed": "2026-05-07",
      "file": "cross-domain/epigenetics-geroscience/b-epigenetic-clocks-aging-biomarkers.yaml"
    },
    {
      "id": "b-predator-detection-signal-detection-theory",
      "title": "An animal deciding whether a stimulus indicates a predator is solving a binary hypothesis test: signal detection theory maps the vigilance threshold exactly onto the decision boundary of a likelihood-ratio test, and ROC curve analysis quantifies the evolutionary trade-off between false alarms (wasted foraging time) and misses (predation risk).\n",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "Signal detection theory (SDT) models a sensory decision as choosing between two overlapping distributions: signal + noise (predator present) vs. noise alone (predator absent). The decision criterion beta = P(stimulus|absent) / P(stimulus|present) is set by the cost-benefit ratio (C_false_alarm / C_m",
      "open_unknowns": [],
      "related_hypotheses": [],
      "communication_gap": "Behavioural ecologists know vigilance theory but rarely formalise it as a statistical detection problem; statisticians do not study foraging animals as model systems for optimal decision theory. The mapping requires bridging psychophysics, Bayesian decision theory, and life-history theory.\n",
      "translation_table": [
        {
          "field_a_term": "sensitivity d' (signal detection theory)",
          "field_b_term": "sensory acuity of the prey animal (evolutionary biology)",
          "note": "d' = (mu_signal - mu_noise) / sigma; larger d' reduces predation risk per unit vigilance"
        },
        {
          "field_a_term": "decision criterion beta (statistics)",
          "field_b_term": "flight-initiation distance / vigilance threshold (evolutionary biology)",
          "note": "Optimal beta = C_false_alarm / C_miss proportional to foraging payoff / predation mortality"
        },
        {
          "field_a_term": "false alarm rate (statistics)",
          "field_b_term": "unnecessary vigilance / interrupted foraging bouts (evolutionary biology)",
          "note": "Each false alarm costs foraging time; high predation pressure reduces tolerable false-alarm rate"
        },
        {
          "field_a_term": "AUROC (statistics)",
          "field_b_term": "overall predator-detection ability (evolutionary biology)",
          "note": "AUROC = P(correct rejection) independent of criterion; measures sensory system quality"
        }
      ],
      "references": [
        {
          "doi": "10.1037/h0054346",
          "note": "Green & Swets (1966) - Signal Detection Theory and Psychophysics"
        },
        {
          "doi": "10.1016/S0003-3472(80)80028-3",
          "note": "Lima & Dill (1990) - behavioral decisions made under the risk of predation"
        },
        {
          "doi": "10.1098/rspb.2005.3165",
          "note": "Fernandez-Juricic et al. (2006) - SDT applied to avian vigilance and predator detection"
        }
      ],
      "last_reviewed": "2026-05-07",
      "file": "cross-domain/evolutionary-biology-statistics/b-predator-detection-signal-detection-theory.yaml"
    },
    {
      "id": "b-market-liquidity-hawkes-processes",
      "title": "High-frequency order-book dynamics and market liquidity exhibit self-exciting behaviour best described by the Hawkes process: each trade event increases the instantaneous probability of subsequent trades via a power-law kernel, making the arrival of market orders a mutually exciting point process whose branching ratio eta = integral of kernel determines whether liquidity cascades (flash crash) or mean-reverts",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "The arrival of limit and market orders on an electronic exchange follows a multivariate Hawkes process N_i(t) with intensity lambda_i(t) = mu_i + sum_j integral_{-inf}^t phi_{ij}(t-s) dN_j(s), where phi_{ij} are power-law excitation kernels; the critical branching ratio eta measures how close the ma",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-hawkes-process-liquidity-flash-crash"
      ],
      "communication_gap": "Quantitative finance professionals calibrate Hawkes processes to match observed order-book statistics while probabilists study Hawkes processes as abstract random processes; systematic methods for non-stationary and non-parametric Hawkes inference have not been fully adopted in operational trading risk systems.",
      "translation_table": [
        {
          "field_a_term": "market order arrival rate lambda(t) (finance)",
          "field_b_term": "conditional intensity of a Hawkes point process (mathematics)",
          "note": "Each order arrival increases the probability of further orders within milliseconds via the self-excitation kernel"
        },
        {
          "field_a_term": "order book depth (bid-ask spread and volume at each level) (finance)",
          "field_b_term": "background intensity mu and kernel amplitude in Hawkes process (mathematics)",
          "note": "Deeper order books correspond to lower excitation amplitude; thin books give near-critical Hawkes branching"
        },
        {
          "field_a_term": "liquidity crisis / flash crash (finance)",
          "field_b_term": "supercritical branching process explosion (eta > 1) (mathematics)",
          "note": "Flash crashes occur when the Hawkes branching ratio crosses 1, producing order-book avalanches"
        },
        {
          "field_a_term": "bid-ask spread mean-reversion (finance)",
          "field_b_term": "subcritical Hawkes process stationary state (eta < 1) (mathematics)",
          "note": "Subcritical branching guarantees eventual return to equilibrium; spread width is related to eta"
        }
      ],
      "references": [
        {
          "doi": "10.1088/1742-5468/2011/12/P12028",
          "note": "Bacry et al. (2012) - non-parametric kernel estimation for symmetric Hawkes processes applied to high-frequency financial data"
        },
        {
          "doi": "10.1007/s00780-015-0282-y",
          "note": "Hardiman et al. (2013) - critical reflexivity in financial markets: a Hawkes process analysis"
        },
        {
          "doi": "10.1142/S2010326314500094",
          "note": "Filimonov & Sornette (2012) - quantifying reflexivity in financial markets: toward a prediction of flash crashes"
        }
      ],
      "last_reviewed": "2026-05-07",
      "file": "cross-domain/finance-mathematics/b-market-liquidity-hawkes-processes.yaml"
    },
    {
      "id": "b-kelvin-helmholtz-instability-stratified-shear-flow",
      "title": "The Kelvin-Helmholtz instability arises at the interface between stratified fluid layers with velocity shear, governed by the Richardson number criterion, and produces the characteristic billowing vortices seen in clouds, ocean thermocline mixing, and planetary atmospheres.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "At the interface between two fluids of densities ρ₁ < ρ₂ moving at velocities U₁ and U₂, the Richardson number Ri = N²/(∂U/∂z)² determines stability: Ri < 0.25 (Miles-Howard theorem) is necessary (though not sufficient) for instability. Linear stability analysis of the Taylor-Goldstein equation give",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-richardson-number-turbulence-onset-universal"
      ],
      "communication_gap": "Geophysicists parameterizing ocean mixing in climate models and fluid mechanicians studying KH instability use the same Richardson number criterion but different modeling approaches; ocean models use bulk mixing parameterizations (KPP, MY) while fluid mechanics provides direct numerical simulations that rarely inform the parameterization choices.\n",
      "translation_table": [
        {
          "field_a_term": "velocity shear across ocean thermocline (geophysics)",
          "field_b_term": "KH instability shear layer (fluid mechanics)",
          "note": "Thermocline velocity shear drives KH instability that mixes heat, salt, and nutrients"
        },
        {
          "field_a_term": "Richardson number Ri (geophysics)",
          "field_b_term": "stability parameter in Taylor-Goldstein equation (fluid mechanics)",
          "note": "Ri < 1/4 necessary for KH instability; buoyancy stabilizes, shear destabilizes"
        },
        {
          "field_a_term": "diapycnal mixing in ocean interior (geophysics)",
          "field_b_term": "turbulent diffusion from KH billow breakdown (fluid mechanics)",
          "note": "KH-generated turbulence is parameterized by mixing efficiency Γ≈0.2 in ocean models"
        },
        {
          "field_a_term": "clear-air turbulence (CAT) in atmosphere (geophysics)",
          "field_b_term": "KH billow breaking at jet stream interface (fluid mechanics)",
          "note": "CAT occurs where Ri < 0.25 near wind shear layers — a hazard to aviation"
        }
      ],
      "references": [
        {
          "doi": "10.1017/S0022112061000305",
          "note": "Miles (1961) — on the stability of heterogeneous shear flows (Miles-Howard theorem)"
        },
        {
          "doi": "10.1175/JPO-D-12-0105.1",
          "note": "Smyth & Moum (2012) — ocean mixing by Kelvin-Helmholtz instability"
        },
        {
          "doi": "10.1175/1520-0493(1994)122<0927:AIOCAM>2.0.CO;2",
          "note": "Large et al. (1994) — KPP mixing scheme using Richardson number criterion"
        }
      ],
      "last_reviewed": "2026-05-07",
      "file": "cross-domain/fluid-mechanics-geophysics/b-kelvin-helmholtz-instability-stratified-shear-flow.yaml"
    },
    {
      "id": "b-capillary-length-x-droplet-contact-line-dynamics",
      "title": "Capillary length (sqrt(gamma/(rho g))) as intrinsic wetting scale ↔ contact-line friction, pinning, and droplet morphology on heterogeneous solids (fluid mechanics ↔ materials science)\n",
      "source_domain": "",
      "target_domain": "",
      "status": "proposed",
      "bridge_claim": "The capillary length ell_c sets the gravity–surface-tension crossover scale for static menisci and droplet shapes on substrates. Contact-line dynamics add hysteresis, microscopic roughness, and chemical heterogeneity that pin the triple line; mesoscopically these effects compete with ell_c to set dr",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-capillary-wetting-pinning-length-universality-class"
      ],
      "communication_gap": "CFD wetting implementations often smear the contact line numerically while materials papers emphasize microgeometry; shared benchmarks for moving-line experiments across fluids are incomplete.\n",
      "translation_table": [
        {
          "field_a_term": "capillary length ell_c = sqrt(gamma / (rho g))",
          "field_b_term": "characteristic droplet base radius before gravity visibly flattens large puddles",
          "note": "Competes with pinning length scales from defects."
        },
        {
          "field_a_term": "Young’s equation equilibrium contact angle",
          "field_b_term": "apparent advancing/receding angles with hysteresis",
          "note": "Materials heterogeneity splits single theta into a band."
        },
        {
          "field_a_term": "Cox–Voinov-type viscous bending near contact line",
          "field_b_term": "molecular-scale friction / MCL models on patterned surfaces",
          "note": "Multiscale coupling is active research on both sides."
        }
      ],
      "references": [
        {
          "doi": "10.1103/RevModPhys.57.827",
          "note": "de Gennes (1985) — wetting: statics and dynamics (Rev. Mod. Phys.)."
        },
        {
          "doi": "10.1103/RevModPhys.81.739",
          "note": "Quéré (2009) — wetting and roughness (Rev. Mod. Phys.)."
        }
      ],
      "last_reviewed": "2026-05-07",
      "file": "cross-domain/fluid-mechanics-materials-science/b-capillary-length-x-droplet-contact-line-dynamics.yaml"
    },
    {
      "id": "b-finite-time-lyapunov-exponents-x-intracardiac-flow-mixing",
      "title": "Finite-time Lyapunov exponents connect Lagrangian coherent-structure analysis to intracardiac flow-mixing risk assessment.",
      "source_domain": "",
      "target_domain": "",
      "status": "proposed",
      "bridge_claim": "LCS/FTLE methods developed for geophysical transport quantify transport barriers and mixing rates in cardiac chambers. This gives a mechanics-first route to stasis and thrombosis-risk indicators.",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-ftle-ridge-persistence-predicts-left-atrial-appendage-stasis"
      ],
      "communication_gap": "Adjacent communities use different software stacks and validation norms, so mathematically equivalent tools are often rediscovered in parallel.",
      "translation_table": [
        {
          "field_a_term": "FTLE ridge",
          "field_b_term": "Transport barrier in blood flow",
          "note": "Persistent ridges indicate poor mixing zones."
        },
        {
          "field_a_term": "Lagrangian coherent structure",
          "field_b_term": "Chamber flow partition",
          "note": "Segments recirculating versus exchange regions."
        },
        {
          "field_a_term": "Finite-time deformation",
          "field_b_term": "Residence-time proxy",
          "note": "Higher deformation usually indicates better washout."
        }
      ],
      "references": [
        {
          "doi": "10.1098/rsta.1922.0009",
          "note": "Fisher (1922) estimation and information."
        },
        {
          "doi": "10.1017/S0962492910000061",
          "note": "Stuart (2010) Bayesian inverse-problem foundations."
        }
      ],
      "last_reviewed": "2026-05-08",
      "file": "cross-domain/fluid-mechanics-medicine/b-finite-time-lyapunov-exponents-x-intracardiac-flow-mixing.yaml"
    },
    {
      "id": "b-atmospheric-blocking-rossby-waves",
      "title": "Atmospheric blocking - persistent high-pressure systems that redirect the jet stream for weeks - is a quasi-stationary Rossby wave resonance phenomenon: geophysical fluid mechanics explains blocking onset through wave-mean flow interaction, barotropic instability, and the Charney-DeVore multiple equilibria framework.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "Rossby waves are large-scale meanders of the atmospheric jet stream driven by the latitudinal gradient of the Coriolis parameter (beta effect). When Rossby wave phase speed matches mean flow speed, waves can become quasi- stationary and amplify through wave-mean flow interaction. Charney & DeVore (1",
      "open_unknowns": [],
      "related_hypotheses": [],
      "communication_gap": "Meteorologists use blocking indices and empirical composites without always framing blocking within the multiple-equilibria and wave-resonance framework of geophysical fluid dynamics; fluid dynamicists rarely analyse reanalysis datasets directly. Climate change impacts on blocking frequency require both the physical theory and observational expertise simultaneously.\n",
      "translation_table": [
        {
          "field_a_term": "quasi-geostrophic potential vorticity equation (fluid mechanics)",
          "field_b_term": "jet stream dynamics / Rossby wave propagation (meteorology)",
          "note": "QG-PV conservation governs large-scale atmospheric flow; blocking is a QG-PV anomaly"
        },
        {
          "field_a_term": "multiple equilibria (fluid mechanics)",
          "field_b_term": "zonal vs. blocked jet stream states (meteorology)",
          "note": "Charney-DeVore model predicts two stable flow regimes; blocking is the non-zonal equilibrium"
        },
        {
          "field_a_term": "wave resonance condition (c_phase = U_mean) (fluid mechanics)",
          "field_b_term": "blocking onset when Rossby wave becomes quasi-stationary (meteorology)",
          "note": "Resonance amplifies wave amplitude; necessary condition for blocking development"
        },
        {
          "field_a_term": "beta-plane approximation (fluid mechanics)",
          "field_b_term": "planetary vorticity gradient that restores Rossby waves (meteorology)",
          "note": "beta = df/dy where f is Coriolis parameter; determines Rossby wave dispersion relation"
        }
      ],
      "references": [
        {
          "doi": "10.1175/1520-0469(1979)036<1205:MSFAST>2.0.CO;2",
          "note": "Charney & DeVore (1979) - multiple flow equilibria in the atmosphere and blocking"
        },
        {
          "doi": "10.1175/MWR-D-13-00143.1",
          "note": "Woollings et al. (2018) - blocking and its response to climate change"
        },
        {
          "doi": "10.1175/BAMS-D-17-0003.1",
          "note": "Nabizadeh et al. (2019) - size of the atmospheric blocking events is increasing under climate change"
        }
      ],
      "last_reviewed": "2026-05-07",
      "file": "cross-domain/fluid-mechanics-meteorology/b-atmospheric-blocking-rossby-waves.yaml"
    },
    {
      "id": "b-isotope-fractionation-kinetic-isotope-effect-transition-state",
      "title": "Isotope fractionation in geochemical systems is governed by the kinetic isotope effect (KIE) from physical chemistry: heavier isotopes have lower zero-point energies relative to the transition state, leading to slower reaction rates and measurable fractionation (δ¹³C, δ¹⁸O, δD) that geochemists use as proxy records of temperature, biological activity, and reaction mechanisms.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "Transition state theory (TST) gives the rate ratio for two isotopic species: k_H/k_D = (ν_H/ν_D)·exp[-(E‡_H - E‡_D)/kT] where ν is the imaginary frequency at the TS and E‡ is the zero-point energy difference. For ¹²C vs ¹³C at the TS of carboxylation, k₁₂/k₁₃ ≈ 1.044 (fractionation factor α). The δ¹",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-proterozoic-sulfur-mif-anoxic-atmosphere-record"
      ],
      "communication_gap": "Isotope geochemists measuring δ¹³C, δ¹⁸O in sediments and organic chemists calculating kinetic isotope effects share the same Bigeleisen-Mayer equations but publish in separate journals (Geochimica et Cosmochimica Acta vs JACS); the quantitative bridge from ab initio KIE calculations to geologic proxy calibration is an active frontier with growing use of DFT in isotope geochemistry.\n",
      "translation_table": [
        {
          "field_a_term": "isotope fractionation factor α (geochemistry)",
          "field_b_term": "rate ratio k_light/k_heavy from kinetic isotope effect (chemistry)",
          "note": "α is the geochemical observable; KIE is the physical chemistry mechanism that sets α"
        },
        {
          "field_a_term": "δ¹³C value of organic matter (geochemistry)",
          "field_b_term": "integrated kinetic isotope effect over a reaction pathway (chemistry)",
          "note": "δ¹³C of -25‰ in C3 plants reflects cumulative enzymatic KIE from CO₂ fixation"
        },
        {
          "field_a_term": "paleotemperature from δ¹⁸O (geochemistry)",
          "field_b_term": "temperature-dependent equilibrium isotope fractionation (chemistry)",
          "note": "Calcite-water oxygen fractionation follows TST-derived T² dependence"
        },
        {
          "field_a_term": "mass-independent fractionation MIF (geochemistry)",
          "field_b_term": "nuclear volume effect or symmetry-dependent photochemistry (chemistry)",
          "note": "MIF (Δ¹⁷O anomalies) requires quantum tunneling or photochemical mechanisms beyond classical KIE"
        }
      ],
      "references": [
        {
          "doi": "10.1063/1.1748490",
          "note": "Bigeleisen & Mayer (1947) - calculation of equilibrium constants for isotopic exchange reactions"
        },
        {
          "doi": "10.1029/JB075i026p05209",
          "note": "O'Neil (1986) - theoretical and experimental aspects of isotopic fractionation"
        },
        {
          "doi": "10.1016/j.epsl.2005.01.015",
          "note": "Schauble (2004) - applying stable isotope fractionation theory to new systems (review)"
        }
      ],
      "last_reviewed": "2026-05-07",
      "file": "cross-domain/geochemistry-chemistry/b-isotope-fractionation-kinetic-isotope-effect-transition-state.yaml"
    },
    {
      "id": "b-silicate-weathering-geocarb-carbon-cycle",
      "title": "Silicate weathering is the dominant long-term regulator of atmospheric CO2 over geological time: the GEOCARB model formalizes this as a negative feedback where elevated CO2 warms climate, accelerating chemical weathering of Ca-Mg silicates that consumes CO2 and precipitates carbonate, controlled by reaction kinetics and thermodynamics\n",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "Silicate weathering (e.g., CaSiO3 + CO2 → CaCO3 + SiO2) consumes atmospheric CO2 at a rate that increases with temperature and CO2 partial pressure, creating a negative feedback that stabilizes climate on 10^5 - 10^6 year timescales: higher CO2 → warming → faster weathering → more CO2 drawdown; the ",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-silicate-weathering-feedback-stabilizes-hothouse"
      ],
      "communication_gap": "Geologists infer past CO2 levels from proxy data (boron isotopes, paleosols) while geochemists study weathering kinetics in laboratory flow-through reactors; quantitative calibration of field weathering rates from laboratory kinetic experiments remains problematic due to physical erosion, biological effects, and mineral surface area changes.\n",
      "translation_table": [
        {
          "field_a_term": "silicate weathering rate W (geology)",
          "field_b_term": "heterogeneous reaction rate of CO2 with silicate minerals at mineral-water interface (chemistry)",
          "note": "W follows modified Arrhenius kinetics with E_a ~ 40-60 kJ/mol; rate increases ~2× per 10°C of warming"
        },
        {
          "field_a_term": "atmospheric CO2 partial pressure pCO2 (geology)",
          "field_b_term": "reactant concentration in the carbonate equilibrium system and carbonic acid weathering agent (chemistry)",
          "note": "CO2(aq) = H2CO3 (carbonic acid) is the primary weathering agent; pCO2 controls equilibrium pH and weathering aggressiveness"
        },
        {
          "field_a_term": "carbonate-silicate cycle (geology)",
          "field_b_term": "coupled redox-neutral carbon cycle: geochemical buffer system on Myr timescales (chemistry)",
          "note": "CO2 consumed in silicate weathering is returned to atmosphere by metamorphic decarbonation of subducted carbonates"
        },
        {
          "field_a_term": "GEOCARB pCO2 reconstruction (geology)",
          "field_b_term": "forward model of atmospheric CO2 from carbon cycle mass balance equations (chemistry)",
          "note": "GEOCARB integrates weathering kinetics, volcanic outgassing, and organic carbon burial to predict Phanerozoic pCO2"
        }
      ],
      "references": [
        {
          "doi": "10.2475/ajs.292.2.81",
          "note": "Berner (1992) - GEOCARB: a model of atmospheric CO2 over Phanerozoic time"
        },
        {
          "doi": "10.1038/s41561-018-0131-5",
          "note": "Caves Rugenstein et al. (2019) - long-term paleotemperature record shows the carbon cycle controls climate"
        }
      ],
      "last_reviewed": "2026-05-07",
      "file": "cross-domain/geochemistry-chemistry/b-silicate-weathering-geocarb-carbon-cycle.yaml"
    },
    {
      "id": "b-mineral-precipitation-ostwald-ripening",
      "title": "Mineral precipitation from supersaturated geological fluids follows Ostwald ripening dynamics — larger crystals grow at the expense of smaller ones via dissolution- reprecipitation — governed by the same Lifshitz-Slyozov-Wagner (LSW) theory used to describe coarsening in materials science, with geochemical precipitation experiments providing the most accessible natural laboratory for crystal coarsening kinetics.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "proposed",
      "bridge_claim": "When a mineral precipitates from supersaturated fluid, initial nucleation produces a polydisperse population of small crystals. Ostwald (1900) observed that this unstable size distribution coarsens over time: large crystals grow while small crystals dissolve, driven by the Gibbs-Thomson effect (high",
      "open_unknowns": [],
      "related_hypotheses": [],
      "communication_gap": "",
      "translation_table": [
        {
          "field_a_term": "Gibbs-Thomson effect (excess chemical potential of small particles)",
          "field_b_term": "higher mineral solubility of small crystals in geological fluids",
          "note": "mu = mu_bulk + 2*gamma*V_m/r — smaller crystals dissolve faster, driving coarsening"
        },
        {
          "field_a_term": "LSW exponent R ~ t^{1/3}",
          "field_b_term": "mean grain size growth law in diagenetic mineral coarsening",
          "note": "The universal t^{1/3} growth law applies to geological calcite, quartz, and feldspar coarsening"
        },
        {
          "field_a_term": "universal LSW size distribution",
          "field_b_term": "self-similar crystal size distribution in late-stage diagenesis",
          "note": "Geological crystal size distributions should converge to the LSW self-similar form"
        },
        {
          "field_a_term": "interfacial energy gamma",
          "field_b_term": "mineral-fluid surface energy (controls nucleation and coarsening rate)",
          "note": "The key materials parameter — differs for each mineral-fluid system"
        }
      ],
      "references": [
        {
          "doi": "10.1016/0022-5096(61)90018-3",
          "note": "Lifshitz & Slyozov (1961) — the kinetics of precipitation from supersaturated solid solutions"
        },
        {
          "doi": "10.1007/BF02803413",
          "note": "Wagner (1961) — theory of precipitate changes by redissolution"
        },
        {
          "doi": "10.1016/j.chemgeo.2013.02.012",
          "note": "Putnis (2015) — mineral replacement reactions in solution"
        }
      ],
      "last_reviewed": "2026-05-07",
      "file": "cross-domain/geochemistry-materials-science/b-mineral-precipitation-ostwald-ripening.yaml"
    },
    {
      "id": "b-plate-tectonics-mantle-convection",
      "title": "Plate tectonics is driven by mantle convection — thermal convection in the viscous mantle (η ~ 10²¹ Pa·s) governed by the same Navier-Stokes equations as atmospheric and oceanic fluid dynamics, with subduction as a Rayleigh-Taylor instability and ridge spreading as upwelling convection cells.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "RAYLEIGH NUMBER CRITERION: Mantle convection occurs when the Rayleigh number exceeds the critical value:\n\n  Ra = ρgαΔTd³ / (ηκ) >> Ra_c ≈ 10³\n\nFor Earth's mantle: ρ = 3300 kg/m³, g = 9.8 m/s², α = 3×10⁻⁵ K⁻¹, ΔT = 1500 K, d = 2900 km, η = 10²¹ Pa·s, κ = 10⁻⁶ m²/s → Ra ~ 10⁷. This is far above Ra_c, ",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-plate-tectonics-initiated-by-bolide-impacts",
        "h-subduction-initiation-passive-margin-collapse"
      ],
      "communication_gap": "Plate tectonics is taught as a geological observation in geology courses without deriving the fluid dynamics that drives it. Fluid dynamicists rarely see geophysics as an application domain. The connection was made by McKenzie, Parker, Morgan and Davies but remains siloed in geophysics literature.\n",
      "translation_table": [
        {
          "field_a_term": "Rayleigh number Ra (fluid dynamics)",
          "field_b_term": "convective vigour of mantle (geophysics)",
          "note": "Ra >> Ra_c means vigorous convection; Ra_c depends on boundary conditions (rigid vs. free-slip plates)"
        },
        {
          "field_a_term": "Rayleigh-Taylor instability (fluid dynamics)",
          "field_b_term": "subduction initiation (plate tectonics)",
          "note": "Sinking of cold dense lithosphere is the gravitational instability that drives plate motion"
        },
        {
          "field_a_term": "Stokes flow / creeping flow (fluid mechanics)",
          "field_b_term": "mantle viscous deformation (geophysics)",
          "note": "Reynolds number near zero; inertial terms negligible; pressure-driven Stokes equations govern"
        },
        {
          "field_a_term": "thermal boundary layer (convection theory)",
          "field_b_term": "oceanic lithosphere (geology)",
          "note": "Oceanic lithosphere IS the thermal boundary layer of mantle convection; thickness grows as sqrt(age)"
        }
      ],
      "references": [
        {
          "doi": "10.1038/216239a0",
          "note": "McKenzie & Parker (1967) Nature 216:1276 — quantitative plate tectonics"
        },
        {
          "doi": "10.1029/JZ073i006p01959",
          "note": "Morgan (1968) J Geophys Res 73:1959 — ridge-transform-trench system"
        },
        {
          "note": "Davies (1999) Dynamic Earth, Cambridge UP — mantle convection textbook"
        },
        {
          "note": "Schubert et al. (2001) Mantle Convection in the Earth and Planets, Cambridge UP"
        }
      ],
      "last_reviewed": "2026-05-06",
      "file": "cross-domain/geology-physics/b-plate-tectonics-mantle-convection.yaml"
    },
    {
      "id": "b-earthquake-soc",
      "title": "Earthquake magnitude-frequency statistics (Gutenberg-Richter law) and aftershock decay (Omori's law) are signatures of self-organized criticality — the Earth's crust maintains itself at a critical state through slow tectonic loading and rapid stress release.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "The Gutenberg-Richter (GR) law, log₁₀N = a - bM (b ≈ 1), states that earthquake frequency falls as a power law with magnitude: N(M) ∝ 10^{-bM}. This is equivalent to a power-law distribution of seismic energy releases: N(E) ∝ E^{-(1+2b/3)}. Power laws without characteristic scale are the hallmark of",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-bvalue-stress-criticality-forecast"
      ],
      "communication_gap": "Geologists and seismologists are primarily empirical scientists working with catalog data; statistical physicists work with models. The SOC connection is well-established in the physics literature but treated skeptically in seismology, where predictive failure of simple SOC models has generated backlash. The debate about whether earthquakes are truly predictable (deterministic chaos near criticality) vs. intrinsically unpredictable (true SOC) is not fully resolved. Journals (Geophysical Research Letters vs. Physical Review Letters) rarely overlap.\n",
      "translation_table": [
        {
          "field_a_term": "fault network / Earth's crust",
          "field_b_term": "SOC cellular automaton lattice (spring-block model)"
        },
        {
          "field_a_term": "tectonic loading rate (slow driving)",
          "field_b_term": "slow sand grain addition in the sandpile model"
        },
        {
          "field_a_term": "earthquake (stress release event)",
          "field_b_term": "avalanche in sandpile / OFC model"
        },
        {
          "field_a_term": "Gutenberg-Richter b-value (~1)",
          "field_b_term": "avalanche size distribution exponent τ at criticality"
        },
        {
          "field_a_term": "earthquake magnitude M",
          "field_b_term": "log₁₀ of avalanche size (energy released)"
        },
        {
          "field_a_term": "Omori p-value (~1)",
          "field_b_term": "temporal autocorrelation decay exponent at critical point"
        },
        {
          "field_a_term": "aftershock sequence",
          "field_b_term": "critical relaxation following a large avalanche"
        },
        {
          "field_a_term": "mainshock rupture area",
          "field_b_term": "cluster size in the percolation problem near criticality"
        },
        {
          "field_a_term": "seismic b-value spatial variation",
          "field_b_term": "distance from SOC critical point (stress heterogeneity)"
        }
      ],
      "references": [
        {
          "note": "Gutenberg & Richter (1944) Bull Seismol Soc Am 34:185 — original GR law"
        },
        {
          "doi": "10.1103/PhysRevLett.59.381",
          "note": "Bak, Tang & Wiesenfeld (1987) Phys Rev Lett 59:381 — original SOC paper"
        },
        {
          "note": "Omori (1894) J Coll Sci Imp Univ Tokyo 7:111 — Omori's aftershock law"
        },
        {
          "doi": "10.1103/PhysRevLett.68.1244",
          "note": "Olami, Feder & Christensen (1992) Phys Rev Lett 68:1244 — OFC spring-block model"
        },
        {
          "doi": "10.1103/PhysRevLett.71.2510",
          "note": "Sornette & Sammis (1995) — critical point model of large earthquakes"
        }
      ],
      "last_reviewed": "2026-05-06",
      "file": "cross-domain/geology-statistical-physics/b-earthquake-soc.yaml"
    },
    {
      "id": "b-adjoint-state-seismic-inversion-x-backprop-gradient-learning",
      "title": "Adjoint-state seismic inversion and neural-network backpropagation share the same reverse-mode gradient calculus.",
      "source_domain": "",
      "target_domain": "",
      "status": "proposed",
      "bridge_claim": "Both full-waveform seismic inversion and deep learning compute gradients by propagating sensitivities backward through a forward model. The mapping is non-trivial because it lets geophysics borrow optimization diagnostics from ML and lets ML borrow PDE-constrained preconditioning from geophysics.",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-adjoint-preconditioning-improves-seismic-inversion-convergence"
      ],
      "communication_gap": "Adjacent communities use different software stacks and validation norms, so mathematically equivalent tools are often rediscovered in parallel.",
      "translation_table": [
        {
          "field_a_term": "Forward wave simulation",
          "field_b_term": "Forward neural pass",
          "note": "Both generate state trajectories used by the reverse pass."
        },
        {
          "field_a_term": "Adjoint wavefield",
          "field_b_term": "Backpropagated error signal",
          "note": "Both are reverse-mode automatic differentiation objects."
        },
        {
          "field_a_term": "Fréchet kernel",
          "field_b_term": "Parameter gradient",
          "note": "Both encode sensitivity of loss to parameters."
        }
      ],
      "references": [
        {
          "doi": "10.1111/j.1365-246X.2005.02489.x",
          "note": "Tromp et al. (2005) adjoint-state sensitivity framework."
        },
        {
          "doi": "10.1038/323533a0",
          "note": "Rumelhart et al. (1986) backpropagation in multilayer networks."
        }
      ],
      "last_reviewed": "2026-05-08",
      "file": "cross-domain/geophysics-computer-science/b-adjoint-state-seismic-inversion-x-backprop-gradient-learning.yaml"
    },
    {
      "id": "b-microseismic-acoustic-emission-fracture",
      "title": "Microseismic monitoring in geophysics and acoustic emission testing in materials science are the same physical phenomenon at different scales: both detect stress-wave radiation from fracture propagation, and the statistical scaling laws (Gutenberg-Richter, power-law amplitude distributions) are identical, enabling cross-scale transfer of fracture mechanics models.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "Acoustic emission (AE) in materials science monitors high-frequency (10 kHz - 10 MHz) stress waves from micro-crack growth in metals, composites, and concrete. Microseismic monitoring (MS) in geophysics records low-frequency (1-1000 Hz) waves from shear slip and tensile fracture in rock masses (mine",
      "open_unknowns": [],
      "related_hypotheses": [],
      "communication_gap": "Materials scientists and geophysicists use different terminology (AE vs. microseismicity), different frequency ranges, and separate literature, despite analysing statistically identical phenomena. Cross-scale calibration of fracture models using lab AE data to interpret mine-scale MS is not standard practice.\n",
      "translation_table": [
        {
          "field_a_term": "b-value in Gutenberg-Richter law (geophysics)",
          "field_b_term": "acoustic emission amplitude distribution slope (materials science)",
          "note": "b ~ 1 in seismology; b ~ 1.5 in lab rock fracture; b > 1.5 near failure"
        },
        {
          "field_a_term": "seismic moment M_0 (geophysics)",
          "field_b_term": "acoustic emission energy / source moment (materials science)",
          "note": "Both scale with slip area times stress drop; same dimensional analysis"
        },
        {
          "field_a_term": "Omori aftershock sequence (geophysics)",
          "field_b_term": "AE burst clustering after stress application (materials science)",
          "note": "n(t) ~ t^{-p} with p ~ 1 in both systems; reflects crack healing and stress redistribution"
        },
        {
          "field_a_term": "failure forecasting by b-value decrease (geophysics)",
          "field_b_term": "material failure precursor by b-value drop before fracture (materials science)",
          "note": "Decreasing b signals approach to catastrophic failure in both lab specimens and rock masses"
        }
      ],
      "references": [
        {
          "doi": "10.1029/2008JB006171",
          "note": "Kwiatek et al. (2010) - acoustic emission and microseismic activity; scaling laws and source mechanisms"
        },
        {
          "doi": "10.1016/j.engfracmech.2006.01.011",
          "note": "Grosse & Ohtsu (2008) - Acoustic Emission Testing; fundamentals for engineering applications"
        },
        {
          "doi": "10.1785/0120150188",
          "note": "Lockner (1993) - role of acoustic emission in the study of rock fracture"
        }
      ],
      "last_reviewed": "2026-05-07",
      "file": "cross-domain/geophysics-materials-science/b-microseismic-acoustic-emission-fracture.yaml"
    },
    {
      "id": "b-geomagnetic-reversal-dynamo",
      "title": "Geomagnetic field reversals are spontaneous symmetry-breaking events in Earth's geodynamo, described by low-dimensional MHD models where reversals correspond to chaotic transitions between two attractors of opposite magnetic polarity",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "Earth's geomagnetic field is generated by convective flow in the outer core, modeled as a magnetohydrodynamic dynamo where the magnetic field satisfies the induction equation dB/dt = curl(v x B) + eta*nabla^2*B; reversals occur when turbulent fluctuations push the system over a separatrix between tw",
      "open_unknowns": [],
      "related_hypotheses": [],
      "communication_gap": "Paleomagnetists study reversal stratigraphy in sediment and volcanic records while MHD dynamicists simulate geodynamo numerically; the connection between observed reversal statistics and dynamical properties of MHD attractors requires expertise in both paleo-data analysis and nonlinear dynamics.",
      "translation_table": [
        {
          "field_a_term": "geomagnetic dipole field strength",
          "field_b_term": "order parameter of MHD dynamo attractor (dipole vs quadrupole)",
          "note": "Reversals involve weakening dipole followed by recovery in opposite polarity; analogous to symmetry breaking in bifurcation"
        },
        {
          "field_a_term": "polarity reversal duration ~ 5,000-10,000 years",
          "field_b_term": "transit time through unstable manifold between MHD attractors",
          "note": "Reversal duration reflects passage through low-field chaotic region; predictable from attractor geometry"
        },
        {
          "field_a_term": "paleomagnetic secular variation rate",
          "field_b_term": "Lyapunov exponent of geodynamo chaotic attractor",
          "note": "Unpredictability of secular variation reflects underlying chaos; reversal timing is stochastic"
        },
        {
          "field_a_term": "reversal frequency in paleomagnetic record",
          "field_b_term": "mean first-passage time between attractor basins",
          "note": "Reversals every ~200,000 yr on average; Poisson-like distribution consistent with noise-driven transitions"
        }
      ],
      "references": [
        {
          "doi": "10.1126/science.1173636",
          "note": "Glatzmaier & Roberts (1995) Nature - first numerical simulation of a geomagnetic reversal"
        },
        {
          "doi": "10.1007/s11214-011-9775-x",
          "note": "Hulot et al. (2010) Space Sci Rev - geomagnetic field and secular variation review"
        },
        {
          "doi": "10.1016/j.pepi.2009.11.003",
          "note": "Olson & Amit (2014) - geomagnetic reversal mechanism in numerical dynamo models"
        }
      ],
      "last_reviewed": "2026-05-07",
      "file": "cross-domain/geophysics-mathematics/b-geomagnetic-reversal-dynamo.yaml"
    },
    {
      "id": "b-satellite-geodesy-spherical-harmonics",
      "title": "Satellite geodesy and geoid modeling are applied spherical harmonic analysis on a rotating, oblate body — the same mathematical framework that describes the quantum mechanical hydrogen atom, and the eigenfunctions (spherical harmonics Y_lm) that solve the angular Laplace equation are the fundamental basis for representing any field on a sphere.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "proposed",
      "bridge_claim": "The geoid — the equipotential surface of Earth's gravity field — is determined by solving Laplace's equation outside a rotating body with irregular mass distribution. The solution decomposes naturally into spherical harmonics Y_lm(theta, phi), where degree l determines spatial resolution and order m",
      "open_unknowns": [],
      "related_hypotheses": [],
      "communication_gap": "",
      "translation_table": [
        {
          "field_a_term": "gravitational potential V(r, theta, phi)",
          "field_b_term": "electron probability amplitude psi_{nlm}(r, theta, phi)",
          "note": "Both satisfy Laplace/Schrodinger equations with spherical harmonic angular solutions"
        },
        {
          "field_a_term": "Stokes coefficients C_lm, S_lm",
          "field_b_term": "multipole moments a_lm of the CMB temperature field",
          "note": "Different physical quantities, identical mathematical representation"
        },
        {
          "field_a_term": "truncation degree L (spatial resolution)",
          "field_b_term": "angular resolution l_max (CMB power spectrum, EM multipoles)",
          "note": "Higher L/l_max = finer spatial resolution; same Gibbs phenomenon truncation artifacts"
        },
        {
          "field_a_term": "GRACE range-rate measurement",
          "field_b_term": "inverse problem: recover C_lm from data",
          "note": "Ill-posed inversion regularized by Tikhonov or spectral methods"
        }
      ],
      "references": [
        {
          "doi": "10.1007/s00190-018-1188-5",
          "note": "Ince et al. (2019) — ICGEM calculation service for spherical harmonic gravity models"
        },
        {
          "doi": "10.1126/science.1094936",
          "note": "Tapley et al. (2004) — GRACE measurements of mass variability in the Earth system"
        },
        {
          "doi": "10.1029/2003JB002504",
          "note": "Wahr et al. (2004) - time-variable gravity from GRACE: first results"
        }
      ],
      "last_reviewed": "2026-05-07",
      "file": "cross-domain/geophysics-mathematics/b-satellite-geodesy-spherical-harmonics.yaml"
    },
    {
      "id": "b-seismic-tomography-inverse-problems",
      "title": "Seismic tomography infers Earth's 3D velocity structure from P-wave travel times via the same Tikhonov-regularized linear inverse theory used in medical imaging and geophysical prospecting, with adjoint-state methods computing sensitivity kernels efficiently through forward + adjoint wavefield simulations.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "Seismic tomography reconstructs the 3D P-wave velocity structure v(x) of Earth's interior from travel time measurements tᵢⱼ = ∫_ray ds/v(x). The ray integral is linearized about a reference model v₀(x): δtᵢⱼ = −∫_ray (δv/v₀²) ds, yielding a linear system d = Gm + ε where d is the vector of travel ti",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-seismic-adjoint-tomography-resolves-mantle-plumes"
      ],
      "communication_gap": "Inverse problem theory was developed in applied mathematics (Tikhonov 1977, Tarantola 1987) and has parallel developments in geophysics, medical imaging, and remote sensing. Seismologists developed their own regularization methods often unaware of the mathematical literature. The adjoint-state method was well-known in optimal control theory (Pontryagin, 1962) decades before Tromp et al. (2005) derived it for seismology. Journal boundaries between SIAM Journal on Applied Mathematics and Geophysical Journal International maintain the gap.\n",
      "translation_table": [
        {
          "field_a_term": "P-wave travel time residual δt",
          "field_b_term": "data vector d in linear inverse problem d = Gm + ε"
        },
        {
          "field_a_term": "3D velocity perturbation δv/v",
          "field_b_term": "model vector m to be estimated"
        },
        {
          "field_a_term": "ray-path sensitivity matrix G",
          "field_b_term": "forward operator (Radon-like transform)",
          "note": "G is sparse — each ray samples only a subset of model cells"
        },
        {
          "field_a_term": "Tikhonov regularization α||Lm||²",
          "field_b_term": "prior information / smoothness constraint on model",
          "note": "Equivalent to Bayesian inference with Gaussian prior precision α²LᵀL"
        },
        {
          "field_a_term": "checkerboard test",
          "field_b_term": "point spread function / resolution matrix R = G†G",
          "note": "Recovered checkerboard reveals where R ≈ I (well-resolved) vs. R ≈ 0 (null space)"
        },
        {
          "field_a_term": "adjoint-state sensitivity kernel K(x)",
          "field_b_term": "Fréchet derivative of misfit with respect to model parameter",
          "note": "Computed by cross-correlating forward and adjoint wavefields — O(1) backward pass"
        },
        {
          "field_a_term": "full waveform inversion (FWI)",
          "field_b_term": "nonlinear iterative gradient descent on waveform misfit",
          "note": "Same structure as neural network backpropagation — gradient via adjoint method"
        }
      ],
      "references": [
        {
          "doi": "10.1029/JB082i002p00239",
          "note": "Dziewonski et al. (1977) JGR 82:239 — first global P-wave tomography"
        },
        {
          "note": "Tikhonov & Arsenin (1977) Solutions of Ill-Posed Problems. V.H. Winston & Sons."
        },
        {
          "doi": "10.1111/j.1365-246X.2005.02489.x",
          "note": "Tromp, Tape & Liu (2005) Geophys J Int 160:195 — adjoint-state sensitivity kernels for seismology"
        },
        {
          "doi": "10.1016/j.earscirev.2012.02.010",
          "note": "Liu & Gu (2012) Earth Sci Rev 112:25 — seismic tomography review"
        }
      ],
      "last_reviewed": "2026-05-06",
      "file": "cross-domain/geophysics-mathematics/b-seismic-tomography-inverse-problems.yaml"
    },
    {
      "id": "b-tectonic-stress-coulomb-failure",
      "title": "Tectonic stress transfer is quantified by the Coulomb failure function: ΔCFF = Δτ + μ(Δσₙ + ΔP) predicts aftershock locations with ~70% accuracy",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "The Coulomb failure function ΔCFF = Δτ + μ(Δσₙ + ΔP) encodes how a mainshock redistributes stress on surrounding fault planes: Δτ is the change in shear stress resolved onto the receiver fault, Δσₙ is the change in normal stress (positive = unclamping), ΔP is pore pressure change, and μ is the effec",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-tectonic-stress-coulomb-failure"
      ],
      "communication_gap": "Seismologists routinely compute Coulomb stress maps but the connection to continuum mechanics (Eshelby inclusion theory, fracture mechanics stress intensity factors) is underexploited. Engineering mechanics literature on crack interactions uses the same elasticity theory but different notation and rarely cites geophysical aftershock literature, creating parallel developments in fault interaction models vs. crack shielding theory.\n",
      "translation_table": [
        {
          "field_a_term": "fault slip during earthquake (seismic moment tensor)",
          "field_b_term": "displacement discontinuity boundary condition in elastic half-space",
          "note": "Okada (1992) formulas give the full stress tensor change Δσᵢⱼ from rectangular fault slip"
        },
        {
          "field_a_term": "aftershock zone geometry",
          "field_b_term": "positive ΔCFF lobe in the Coulomb stress map",
          "note": "Statistical tests show aftershock densities correlate strongly with ΔCFF > 0.1 bar"
        },
        {
          "field_a_term": "fault friction coefficient μ",
          "field_b_term": "slope of the Mohr-Coulomb failure envelope in shear vs normal stress space",
          "note": "μ~0.6 (Byerlee's law for most rocks) sets the critical stress ratio"
        },
        {
          "field_a_term": "seismic quiescence (stress shadow)",
          "field_b_term": "negative ΔCFF region inhibiting fault reactivation",
          "note": "ΔCFF < 0 moves faults away from failure; quiescence observed in M≥3 catalogs"
        }
      ],
      "references": [
        {
          "doi": "10.1029/92JB00182",
          "note": "Okada (1992) Internal deformation due to shear and tensile faults in a half-space. Bull Seism Soc Am 82:1018"
        },
        {
          "doi": "10.1038/373612a0",
          "note": "King et al. (1994) Static stress changes and the triggering of earthquakes. Bull Seism Soc Am 84:935"
        },
        {
          "doi": "10.1785/0120060211",
          "note": "Stein (1999) The role of stress transfer in earthquake occurrence. Nature 402:605"
        }
      ],
      "last_reviewed": "2026-05-07",
      "file": "cross-domain/geophysics-mathematics/b-tectonic-stress-coulomb-failure.yaml"
    },
    {
      "id": "b-kriging-geostatistics",
      "title": "Kriging / geostatistics ↔ Gaussian process regression — optimal spatial interpolation as machine learning",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "Kriging (Krige 1951, formalised by Matheron 1963) is the minimum-variance linear unbiased estimator for spatially correlated data: Ẑ(x₀) = Σᵢ λᵢZ(xᵢ), where the optimal weights λᵢ are determined by solving the kriging system derived from the variogram γ(h) = ½E[Z(x+h)−Z(x)]². The variogram quantifie",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-geostatistical-kriging-as-optimal-climate-downscaling"
      ],
      "communication_gap": "Matheron's geostatistics school (Paris) published in French mining engineering journals inaccessible to English-speaking statisticians. Gaussian process regression entered machine learning through O'Hagan's Bayesian statistics (1970s) and Rasmussen's computational work (2006) with no awareness of Krige or Matheron. The formal equivalence was established in scattered statistics papers (Cressie 1990, Stein 1999) but has not fully propagated to either community. Software ecosystems remain separate (GSLIB vs GPy/GPflow).\n",
      "translation_table": [
        {
          "field_a_term": "variogram γ(h) = ½Var[Z(x+h)−Z(x)]",
          "field_b_term": "GP covariance kernel k(x,x') = Cov[f(x),f(x')]",
          "note": "Stationary kernel: γ(h) = σ² − k(h); Matérn, spherical, exponential kernels all have variogram equivalents"
        },
        {
          "field_a_term": "kriging system (matrix equation for weights λᵢ)",
          "field_b_term": "GP posterior mean (kernel matrix inversion)",
          "note": "Algebraically identical systems of equations; differ only in notation and derivation route"
        },
        {
          "field_a_term": "kriging variance σ²_K(x₀)",
          "field_b_term": "GP posterior variance σ²_GP(x₀)",
          "note": "Both quantify prediction uncertainty; equivalent under the stationarity assumption"
        },
        {
          "field_a_term": "nugget effect (variance at zero lag)",
          "field_b_term": "observation noise variance σ²_n",
          "note": "Both represent measurement error plus micro-scale variability below the sampling scale"
        },
        {
          "field_a_term": "sill (variogram asymptote)",
          "field_b_term": "GP prior variance σ²_f",
          "note": "The asymptotic variance at large lag — the prior signal variance in GP language"
        },
        {
          "field_a_term": "range (lag at which variogram reaches sill)",
          "field_b_term": "lengthscale ℓ (kernel bandwidth)",
          "note": "Controls the spatial correlation distance; equivalent to kernel lengthscale"
        }
      ],
      "references": [
        {
          "note": "Krige (1951) J S Afr Inst Min Metall 52:119 — original kriging for gold ore estimation",
          "url": "https://journals.co.za/doi/10.10520/AJA0038223X_4765"
        },
        {
          "note": "Matheron (1963) Econ Geol 58:1246 — formal theory of regionalised variables",
          "doi": "10.2113/gsecongeo.58.8.1246"
        },
        {
          "note": "Journel & Huijbregts (1978) Mining Geostatistics. Academic Press.",
          "url": "https://www.sciencedirect.com/book/9780123910507"
        },
        {
          "note": "Rasmussen & Williams (2006) Gaussian Processes for Machine Learning. MIT Press.",
          "url": "http://www.gaussianprocess.org/gpml/"
        }
      ],
      "last_reviewed": "2026-05-06",
      "file": "cross-domain/geophysics-statistics/b-kriging-geostatistics.yaml"
    },
    {
      "id": "b-earthquake-early-warning-x-recursive-bayesian-source-estimation",
      "title": "Earthquake early warning systems fuse sparse P-wave arrivals into evolving magnitude and location estimates before destructive S-waves arrive — the operational backbone is recursive Bayesian / Kalman-style updating of seismic source parameters under latency constraints (seismology ↔ estimation theory).\n",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "EEW pipelines ingest triggers from dense networks, invert for centroid stress drop proxies and magnitude as data arrive; early magnitude estimates have large variance that contracts as more stations contribute—behavior formally analogous to filtering states x_{t+1} = F(x_t) + w with observations y_t",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-eew-kalman-style-updates-tighten-magnitude-posterior-faster-with-dense-networks"
      ],
      "communication_gap": "Seismology operational documents describe “real-time source determination” with center-specific names; control engineers recognize Kalman structure but may not know earth structure limitations (3D Green’s function errors) that dominate “process noise” in EEW.\n",
      "translation_table": [
        {
          "field_a_term": "P-wave hypocenter pick latency Δt",
          "field_b_term": "filter initialization delay before first credible posterior on magnitude M",
          "note": "Blind zones near the epicenter reflect insufficient geometric aperture — analogous to unobservable partial states."
        },
        {
          "field_a_term": "Virtual seismogram stacks / finite-fault back-projection (as more data accrue)",
          "field_b_term": "innovation updates shrinking posterior covariance",
          "note": "Not a linear-Gaussian fault model in practice, but the information accrual pattern matches filtering."
        },
        {
          "field_a_term": "false/missed alert costs",
          "field_b_term": "decision-theoretic loss outside pure L2 estimation (ties to separate hypothesis testing bridge)",
          "note": "EEW is not only estimation; public alerting injects social loss functions."
        }
      ],
      "references": [
        {
          "doi": "10.1146/annurev.earth.031208.100055",
          "note": "Allen et al. (2009) — earthquake early warning systems (Annu. Rev. Earth Planet. Sci.)."
        },
        {
          "doi": "10.1115/1.3662552",
          "note": "Kalman (1960) — linear filtering update algebra baseline (ASME J. Basic Eng.)."
        }
      ],
      "last_reviewed": "2026-05-07",
      "file": "cross-domain/geoscience-engineering/b-earthquake-early-warning-x-recursive-bayesian-source-estimation.yaml"
    },
    {
      "id": "b-kalman-state-estimation-x-nwp-data-assimilation",
      "title": "Kalman filtering / Kalman–Bucy smoothing ↔ operational data assimilation in numerical weather prediction (estimation theory ↔ geoscience engineering)\n",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "Numerical weather prediction centers fuse observations with model trajectories using variants of Kalman filtering: extended Kalman filters historically, ensemble Kalman filters (EnKF) and four-dimensional variational assimilation (4D-Var) operationally. The shared backbone is recursive Bayesian upda",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-adaptive-inflation-ensemble-kalman-corrects-extreme-events"
      ],
      "communication_gap": "Control textbooks teach low-dimensional LQG; NWP assimilation papers emphasize HPC and geophysical constraints. Students rarely see that EnKF is structurally a Monte Carlo Kalman update.\n",
      "translation_table": [
        {
          "field_a_term": "Kalman gain K = P H^T (H P H^T + R)^{-1}",
          "field_b_term": "assimilation increment weights in EnKF/4D-Var implementations",
          "note": "Same update algebra skeleton under Gaussian assumptions."
        },
        {
          "field_a_term": "process noise covariance Q",
          "field_b_term": "model error and stochastic parameterizations in NWP",
          "note": "Tuned empirically with huge state vectors."
        },
        {
          "field_a_term": "innovation y - H x",
          "field_b_term": "observation minus background in operational diagnostics",
          "note": "Monitoring divergence drives cycling QC."
        }
      ],
      "references": [
        {
          "doi": "10.1115/1.3662552",
          "note": "Kalman (1960) — linear filtering and prediction (ASME J. Basic Eng.)."
        },
        {
          "doi": "10.1175/1520-0469(1994)051<1747:ROEOIA>2.0.CO;2",
          "note": "Lorenc (1994) — development of operational assimilation methods (J. Atmos. Sci.)."
        }
      ],
      "last_reviewed": "2026-05-07",
      "file": "cross-domain/geoscience-engineering/b-kalman-state-estimation-x-nwp-data-assimilation.yaml"
    },
    {
      "id": "b-plate-boundary-slip-x-fracture-mechanics",
      "title": "Lithospheric plate boundaries concentrate shear and unlock episodic slip — earthquakes — mirroring crack-tip stress intensities and fracture toughness concepts in engineering fracture mechanics where strain energy release rates govern unstable crack growth when loading exceeds critical stress intensity K_IC.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "Elastic rebound theory treats faults as planar shear cracks storing elastic strain energy released during rupture. Linear elastic fracture mechanics defines mode-II/III stress intensity factors K at crack tips controlling propagation thresholds — analogous roles for asperity-breaking on faults thoug",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-plate-boundary-slip-x-fracture-mechanics"
      ],
      "communication_gap": "Earthquake physics curricula emphasize elastodynamics and friction empirical laws but sometimes omit canonical fracture-mechanics notation (K, J-integral) used in aerospace and civil inspection standards.\n",
      "translation_table": [
        {
          "field_a_term": "Fault slip δ and rupture area A (seismic moment M0 = μ A δ)",
          "field_b_term": "Crack opening/slide area and displacement in fracture experiments",
          "note": "Same bilinear moment scaling with shear modulus μ parallels stiffness-controlled energy release."
        },
        {
          "field_a_term": "Stress drop Δσ on faults",
          "field_b_term": "Critical stress intensity before arrest / dynamic overshoot in cracks",
          "note": "Both quantify excess stress relieved during catastrophic slip events."
        },
        {
          "field_a_term": "Frictional cohesive zone (rate-and-state)",
          "field_b_term": "Cohesive zone models (Dugdale–Barenblatt) bridging linear fracture tips",
          "note": "Engineers regularize singular crack tips; geophysicists regularize with friction laws."
        }
      ],
      "references": [
        {
          "doi": "10.1029/JB074i008p02153",
          "note": "Brune (1970) — seismic source spectra tied to stress drop scaling"
        },
        {
          "doi": "10.1016/0148-9062(94)90296-S",
          "note": "Scholz (textbook tradition) rock friction and fracture — representative chapter DOI cluster"
        }
      ],
      "last_reviewed": "2026-05-07",
      "file": "cross-domain/geoscience-engineering/b-plate-boundary-slip-x-fracture-mechanics.yaml"
    },
    {
      "id": "b-tsunami-shallow-water-x-dispersive-soliton-bore",
      "title": "Long-wavelength tsunami propagation over varying depth is commonly modeled with shallow-water equations whose nonlinear and dispersive corrections predict bore formation, shock-like steepening, and — in idealized integrable limits — solitary-wave solutions resembling solitons, though real ocean tsunamis span rupture complexity, bathymetry focusing, and dissipation beyond textbook KdV universality.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "Linear shallow-water theory explains propagation speeds c = √(g h) and teleseismic arrival ordering; nonlinearity steepens wave fronts into bores when dispersion is weak. Weakly nonlinear dispersive models (Boussinesq / KdV limits) connect to soliton mathematics for idealized initial disturbances — ",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-tsunami-front-regime-classifier-nonlinear-dispersive-bore"
      ],
      "communication_gap": "Tsunami warnings emphasize seismic sources and inundation mapping while mathematical physics courses emphasize integrable models — public-facing soliton metaphors often omit bore formation and bathymetry scattering that dominate coastal observations.\n",
      "translation_table": [
        {
          "field_a_term": "shallow-water phase speed √(g h)",
          "field_b_term": "leading-edge arrival time constraints for long waves",
          "note": "Cornerstone shared prediction across tsunami engineering and theory."
        },
        {
          "field_a_term": "bore / hydraulic jump structure in NS shallow-water limits",
          "field_b_term": "steep arriving wave fronts with embedded turbulence and breaking",
          "note": "Breaks clean soliton picture — labeled caution for metaphor-heavy explanations."
        },
        {
          "field_a_term": "KdV soliton solution under balance of nonlinearity and dispersion",
          "field_b_term": "rare idealized laboratory tsunami analogs and stylized initial-value problems",
          "note": "Pedagogical bridge — not a blanket claim for megathrust-generated tsunamis."
        }
      ],
      "references": [
        {
          "doi": "10.1017/S0022112087000398",
          "note": "Synolakis (1987) — run-up of solitary waves (J Fluid Mech) — canonical shallow-water soliton/bore discussion lineage."
        },
        {
          "doi": "10.1016/j.wavemoti.2010.04.002",
          "note": "Craig et al. (2010) — Hamiltonian long-wave modulation theory illustrating dispersive/nonlinear balances relevant beyond purely hydrostatic shallow-water limits (Wave Motion)."
        }
      ],
      "last_reviewed": "2026-05-07",
      "file": "cross-domain/geoscience-fluid-mechanics/b-tsunami-shallow-water-x-dispersive-soliton-bore.yaml"
    },
    {
      "id": "b-unet-x-satellite-flood-extent-mapping",
      "title": "U-Net segmentation bridges biomedical pixel-wise inference and satellite flood-extent mapping under cloud and sensor noise.",
      "source_domain": "",
      "target_domain": "",
      "status": "proposed",
      "bridge_claim": "Speculative analogy (to be empirically validated): encoder-decoder skip architectures developed for biomedical segmentation transfer to flood delineation by preserving fine boundary detail while integrating broad hydrologic context.",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-unet-domain-randomization-improves-flood-mapping-recall"
      ],
      "communication_gap": "Remote-sensing operations emphasize geospatial reliability and update latency, while ML studies often report static benchmark accuracy only.",
      "translation_table": [
        {
          "field_a_term": "skip connection",
          "field_b_term": "high-resolution flood boundary retention",
          "note": "Skip paths preserve local edge detail."
        },
        {
          "field_a_term": "context encoder",
          "field_b_term": "catchment-scale context extraction",
          "note": "Deep layers capture broad spatial dependencies."
        },
        {
          "field_a_term": "pixel-wise supervision",
          "field_b_term": "binary inundation mask learning",
          "note": "Both tasks optimize dense segmentation labels."
        }
      ],
      "references": [
        {
          "arxiv": "1505.04597",
          "note": "U-Net architecture."
        },
        {
          "url": "https://www.nasa.gov/mission_pages/flooding/index.html",
          "note": "Satellite flood monitoring context."
        }
      ],
      "last_reviewed": "2026-05-08",
      "file": "cross-domain/geoscience-machine-learning/b-unet-x-satellite-flood-extent-mapping.yaml"
    },
    {
      "id": "b-biogeochemical-box-models-x-attractor-stability",
      "title": "Coupled ocean–atmosphere–sediment box models of carbon, nitrogen, and phosphorus cycles can exhibit multiple stable steady states (or slow manifolds) when nonlinear uptake kinetics and burial feedbacks combine — mapping onto finite-dimensional dynamical systems attractors and bifurcation diagrams used in mathematical ecology and climate theory despite vastly slower timescales.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "proposed",
      "bridge_claim": "Compartmental ODE models aggregate reservoirs (surface ocean, deep ocean, atmosphere, lithosphere) with flux arrows forming directed graphs — Jacobian spectra determine linear stability of steady states — identical formalism to predator–prey or climate energy-balance models exhibiting saddle-node bi",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-biogeochemical-box-models-x-attractor-stability"
      ],
      "communication_gap": "Biogeochemistry field programs emphasize geochemical tracers and isotopic budgets while dynamical systems seminars emphasize bifurcation proofs — integrated textbooks crossing both remain sparse relative to climate tipping-point surveys.\n",
      "translation_table": [
        {
          "field_a_term": "Reservoir mass M_i (Pg C etc.)",
          "field_b_term": "State vector components x_i in compartmental ODEs x' = f(x)",
          "note": "Same Cauchy problem setup as canonical coupled compartment pharmacokinetics."
        },
        {
          "field_a_term": "Flux laws (Michaelis–Menten export production)",
          "field_b_term": "Nonlinear source terms creating multiple equilibria in autonomous ODEs",
          "note": "Positive feedback curvature yields bifurcations analogous to lake eutrophication models."
        },
        {
          "field_a_term": "Sedimentary burial closure",
          "field_b_term": "Dissipative removal terms shrinking admissible invariant sets",
          "note": "Provides asymptotic stability toward attracting manifolds when parameters permit."
        }
      ],
      "references": [
        {
          "doi": "10.1029/2001GB001398",
          "note": "Archer (2002) Global Biogeochemical Cycles — marine carbon cycle box-model lineage context"
        },
        {
          "doi": "10.1175/1520-0469(1969)026<0561:TTACOT>2.0.CO;2",
          "note": "Sellers (1969) JAS — simple climate balance model exemplifying global compartment dynamics"
        }
      ],
      "last_reviewed": "2026-05-07",
      "file": "cross-domain/geoscience-mathematics/b-biogeochemical-box-models-x-attractor-stability.yaml"
    },
    {
      "id": "b-coastal-erosion-x-diffusive-interface",
      "title": "Large-scale coastline shapes and shoreline erosion fronts can be modeled using interface dynamics and diffusive / reaction–diffusion ideas familiar from mathematical physics.",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "Coastal profiles evolve under wave forcing, sediment transport, and sea-level rise. Reduced models treat the shoreline as a moving curve whose normal velocity depends on local curvature, fluxes, and noise — analogous to interface problems in phase field models where an order parameter diffuses and s",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-diffusive-interface-models-predict-shoreline-roughening-exponents"
      ],
      "communication_gap": "Coastal engineering often uses empirical Bruun-rule style heuristics; PDE interface community uses different validation metrics, limiting shared benchmarks.",
      "translation_table": [
        {
          "field_a_term": "interface normal velocity law",
          "field_b_term": "alongshore sediment flux divergence"
        },
        {
          "field_a_term": "surface tension–like regularization",
          "field_b_term": "smoothing by wave-driven diffusive transport"
        },
        {
          "field_a_term": "noise-driven roughening",
          "field_b_term": "storm variability and discrete slump events"
        }
      ],
      "references": [
        {
          "doi": "10.1029/2001JC001217",
          "note": "Ashton, Murray & Arnoult (2001) — shoreline instability / high-angle wave approach (interface flavor)"
        },
        {
          "doi": "10.1029/2006JF000542",
          "note": "Valvo, Murray & Ashton (2007) — long-term shoreline modeling and statistics (follow-on bridge)"
        }
      ],
      "last_reviewed": "2026-05-07",
      "file": "cross-domain/geoscience-mathematics/b-coastal-erosion-x-diffusive-interface.yaml"
    },
    {
      "id": "b-ice-core-proxy-inverse-methods",
      "title": "Ice core paleoclimatology is an applied inverse problem: chemical and isotopic proxies (delta-18O, dust, CO2, CH4) encode past climate states in a noisy, non-linear forward model, and reconstructing the underlying temperature history requires the same Bayesian inversion, regularisation, and uncertainty quantification methods used in geophysical tomography and medical imaging.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "proposed",
      "bridge_claim": "Ice cores archive past atmospheric composition and temperature through physical and chemical fractionation processes. The stable isotope ratio delta-18O records condensation temperature via the Rayleigh fractionation forward model, while gas bubbles trap ancient air. Reconstructing the continuous te",
      "open_unknowns": [],
      "related_hypotheses": [],
      "communication_gap": "",
      "translation_table": [
        {
          "field_a_term": "forward model g(m) = d",
          "field_b_term": "climate-to-proxy relationship (e.g., Rayleigh fractionation)",
          "note": "Maps the model parameters (temperature history) to observables (delta-18O)"
        },
        {
          "field_a_term": "regularisation parameter lambda",
          "field_b_term": "smoothness prior on climate variability",
          "note": "Too small = noisy reconstruction; too large = over-smoothed — same Tikhonov trade-off"
        },
        {
          "field_a_term": "model covariance matrix C_m",
          "field_b_term": "prior probability distribution on climate states",
          "note": "Encodes knowledge of typical climate variability spectra before seeing the data"
        },
        {
          "field_a_term": "data kernel / sensitivity matrix G",
          "field_b_term": "proxy sensitivity to climate variables",
          "note": "Quantifies how much each measurement constrains each time window of climate"
        }
      ],
      "references": [
        {
          "doi": "10.1126/science.291.5501.105",
          "note": "EPICA members (2004) — Eight glacial cycles from an Antarctic ice core (Nature)"
        },
        {
          "doi": "10.1029/2011RG000384",
          "note": "Tingley & Huybers (2010) — A Bayesian algorithm for reconstructing climate anomalies in space and time"
        },
        {
          "doi": "10.1175/2009JCLI3015.1",
          "note": "Mann et al. (2009) — Atlantic hurricanes and climate over the past 1500 years"
        }
      ],
      "last_reviewed": "2026-05-07",
      "file": "cross-domain/geoscience-mathematics/b-ice-core-proxy-inverse-methods.yaml"
    },
    {
      "id": "b-nitrogen-cycle-reservoirs-x-coupled-oscillator-stability",
      "title": "Coupled reservoir models of the global nitrogen cycle link atmosphere, land, and ocean pools through fixation, nitrification/denitrification, and export production — under perturbation their linearized Jacobian spectra resemble stability analysis of coupled damped oscillators, clarifying when anthropogenic forcing yields monotonic relaxation versus transient oscillatory nutrient anomalies.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "proposed",
      "bridge_claim": "Box-and-arrow budgets produce nonlinear ODEs whose Jacobian eigenvalues determine damping versus oscillatory approaches to steady states — qualitatively analogous to mass–spring–damper networks — yet microbial trait diversity, sub-grid heterogeneity, and climate feedbacks break low-dimensional oscil",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-linearized-n-cycle-models-predict-chlorophyll-mode-timescales"
      ],
      "communication_gap": "Biogeochemistry field campaigns emphasize flux measurements while dynamical-systems courses illustrate eigenmode pictures on abstract graphs — graduate coupling remains patchy outside Earth-system modeling groups.\n",
      "translation_table": [
        {
          "field_a_term": "Jacobian eigenvalues of coupled N reservoir system (timescale separation)",
          "field_b_term": "normal modes of coupled oscillators / linear stability eigenmodes",
          "note": "Same spectral stability analysis machinery applies after linearization."
        },
        {
          "field_a_term": "residence times τ_i in individual pools",
          "field_b_term": "inverse damping coefficients in oscillator chains",
          "note": "Helpful mnemonic; nonlinear couplings differ beyond diagonal dominance assumptions."
        },
        {
          "field_a_term": "Hopf-type emergence of oscillatory chlorophyll anomalies in coupled climate–ecosystem models",
          "field_b_term": "limit cycles in weakly nonlinear oscillator networks",
          "note": "Speculative mapping flagged — Earth-system models may exhibit Hopf bifurcations under parameter shifts."
        }
      ],
      "references": [
        {
          "doi": "10.1038/nature06892",
          "note": "Gruber & Galloway (2008) — Earth-system perspective on global nitrogen cycle (Nature)"
        },
        {
          "doi": "10.1073/pnas.0707975105",
          "note": "Lenton et al. (2008) — tipping elements in the Earth system (coupled dynamics framing)"
        }
      ],
      "last_reviewed": "2026-05-07",
      "file": "cross-domain/geoscience-mathematics/b-nitrogen-cycle-reservoirs-x-coupled-oscillator-stability.yaml"
    },
    {
      "id": "b-plate-tectonics-topology",
      "title": "Plate tectonic motion on Earth's surface is an exact realisation of the mathematical theory of rigid motions on a sphere: every plate motion is a rotation in SO(3) about an Euler pole, hotspot tracks are geodesics on the rotation manifold, and triple junction stability obeys the Euler characteristic constraint of the 2-sphere.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "Euler's fixed-point theorem (1776) states that every orientation- preserving rigid motion of the 2-sphere S² is a rotation about some axis passing through the centre — the Euler pole. McKenzie & Parker (1967) and Morgan (1968) independently recognised that this theorem completely describes plate tec",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-plate-circuit-closure-so3-constraint"
      ],
      "communication_gap": "The application of Euler's theorem to tectonics was discovered by geophysicists (McKenzie & Parker 1967, Morgan 1968) who were aware of the theorem but not of the deeper Lie group and fiber bundle structure. Differential geometers and topologists who develop the theory of SO(3) bundles, Lie group statistics, and holonomy are unaware of the tectonic application. Geoscience education introduces Euler poles as a practical tool without the full mathematical context, while pure mathematics courses on Lie groups never mention tectonics.\n",
      "translation_table": [
        {
          "field_a_term": "Euler pole (fixed point of rotation on sphere)",
          "field_b_term": "Angular velocity vector ω of a tectonic plate",
          "note": "The direction of ω gives the Euler pole coordinates; |ω| gives rotation rate in °/Myr"
        },
        {
          "field_a_term": "Rotation group SO(3)",
          "field_b_term": "Space of all tectonic plate configurations on the globe",
          "note": "Each plate's relative orientation is an element of SO(3); composition = plate circuit closure"
        },
        {
          "field_a_term": "Geodesic on S² (great circle)",
          "field_b_term": "Transform fault trace on the ocean floor",
          "note": "Transform faults follow small circles centred on the Euler pole — geodesics in the rotation geometry"
        },
        {
          "field_a_term": "Euler characteristic χ = V - E + F = 2 for S²",
          "field_b_term": "Constraint on the global plate boundary topology",
          "note": "Limits the possible numbers and types of triple junctions on Earth's surface"
        },
        {
          "field_a_term": "Lie algebra so(3) (infinitesimal rotations)",
          "field_b_term": "Instantaneous angular velocity of plate motion",
          "note": "The angular velocity ω is an element of the Lie algebra; integration gives finite rotation"
        },
        {
          "field_a_term": "Principal fiber bundle over base manifold",
          "field_b_term": "Full kinematic state of the tectonic plate system over geological time",
          "note": "Plate topology changes (subduction, ridge jumps) are topological defects in this bundle"
        },
        {
          "field_a_term": "Parallel transport on SO(3)",
          "field_b_term": "Finite rotation reconstruction from stage poles",
          "note": "Reconstructing past plate positions = parallel-transporting the rotation along the SO(3) geodesic"
        }
      ],
      "references": [
        {
          "doi": "10.1038/2161276a0",
          "note": "McKenzie & Parker (1967) Nature 216:1276 — The North Pacific: an example of tectonics on a sphere"
        },
        {
          "doi": "10.1029/JB073i006p01959",
          "note": "Morgan (1968) J Geophys Res 73:1959 — Rises, trenches, great faults and crustal blocks; Euler pole kinematics"
        },
        {
          "note": "Euler (1776) Novi Commentarii Academiae Scientiarum Petropolitanae 20:189 — Formulae generales pro translatione quacunque corporum rigidorum",
          "url": "https://scholarlycommons.pacific.edu/euler-works/478/"
        },
        {
          "note": "Cox & Hart (1986) Plate Tectonics: How It Works (Blackwell) — textbook treatment of SO(3) kinematics",
          "url": "https://www.wiley.com/en-us/Plate+Tectonics%3A+How+It+Works-p-9780865420045"
        },
        {
          "doi": "10.1038/224125a0",
          "note": "McKenzie & Morgan (1969) Nature 224:125 — Evolution of triple junctions; topological stability analysis"
        }
      ],
      "last_reviewed": "2026-05-05",
      "file": "cross-domain/geoscience-mathematics/b-plate-tectonics-topology.yaml"
    },
    {
      "id": "b-soil-aggregate-fractal-pore-stability",
      "title": "Soil aggregate stability and water retention are governed by fractal pore-size distributions: the mass fractal dimension D_f of soil aggregates predicts hydraulic conductivity, air-entry pressure, and resistance to disruption, unifying soil physics and fractal geometry through a single structural parameter measurable by mercury intrusion porosimetry.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "Mandelbrot's fractal geometry provides a quantitative framework for the irregular, scale-invariant structure of soil aggregates. The cumulative pore-size distribution N(r > R) ~ R^{-D_f} (D_f ~ 2.6-3.0 for typical agricultural soils) implies the fractal pore-volume distribution V(r > R) ~ R^{3-D_f},",
      "open_unknowns": [],
      "related_hypotheses": [],
      "communication_gap": "Soil scientists use fractal terminology (fractal dimension) without applying the full mathematical toolkit (multifractal analysis, lacunarity, random fractal models); applied mathematicians studying fractal geometry rarely work with soil physical data. Standardised measurement protocols for soil fractal dimension are not established.\n",
      "translation_table": [
        {
          "field_a_term": "mass fractal dimension D_f (mathematics)",
          "field_b_term": "soil aggregate structural complexity (soil science)",
          "note": "D_f measured by log-log slope of N(r>R) from mercury intrusion or image analysis"
        },
        {
          "field_a_term": "fractal surface area ~ R^{2-D_f} (mathematics)",
          "field_b_term": "clay-humus bonding surface in soil aggregates (soil science)",
          "note": "Higher D_f implies more interfacial area per unit volume - stronger aggregate bonding"
        },
        {
          "field_a_term": "lacunarity (fractal texture measure) (mathematics)",
          "field_b_term": "soil pore uniformity / heterogeneity (soil science)",
          "note": "High lacunarity indicates clustered pores; linked to preferential flow and reduced water use efficiency"
        },
        {
          "field_a_term": "multifractal spectrum f(alpha) (mathematics)",
          "field_b_term": "spatial variability of soil hydraulic properties across scales (soil science)",
          "note": "Multifractal analysis captures non-uniform scaling of saturated conductivity"
        }
      ],
      "references": [
        {
          "doi": "10.1097/00010694-199102000-00005",
          "note": "Tyler & Wheatcraft (1990) - fractal scaling of soil particle-size distributions"
        },
        {
          "doi": "10.1016/S0016-7061(99)00091-7",
          "note": "Gimenez et al. (1997) - fractal models for predicting soil hydraulic properties"
        },
        {
          "doi": "10.2136/sssaj2004.0583",
          "note": "Perrier et al. (1999) - new models of fractal soil structure and transport"
        }
      ],
      "last_reviewed": "2026-05-07",
      "file": "cross-domain/geoscience-mathematics/b-soil-aggregate-fractal-pore-stability.yaml"
    },
    {
      "id": "b-eikonal-wavefronts-x-cardiac-activation-mapping",
      "title": "Eikonal wavefront equations unify seismic travel-time inversion and cardiac activation-time mapping.",
      "source_domain": "",
      "target_domain": "",
      "status": "proposed",
      "bridge_claim": "Speculative analogy: Eikonal wavefront equations unify seismic travel-time inversion and cardiac activation-time mapping.",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-eikonal-regularized-inversion-improves-cardiac-activation-map-fidelity"
      ],
      "communication_gap": "The two communities use different notation, benchmarks, and publication venues, which obscures transferable mathematical structure.",
      "translation_table": [],
      "references": [
        {
          "doi": "10.1190/1.1444608",
          "note": "Finite-difference travel times and eikonal inversion."
        }
      ],
      "last_reviewed": "2026-05-08",
      "file": "cross-domain/geoscience-medicine/b-eikonal-wavefronts-x-cardiac-activation-mapping.yaml"
    },
    {
      "id": "b-ensemble-kalman-smoothing-x-icu-latent-state-estimation",
      "title": "Ensemble Kalman smoothing links weather data assimilation and ICU latent-state tracking in physiological digital twins.",
      "source_domain": "",
      "target_domain": "",
      "status": "proposed",
      "bridge_claim": "Operational weather systems and ICU physiology models both require sequential state correction under partial noisy observations. Ensemble Kalman smoothing translates directly as a practical uncertainty-aware update strategy.",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-localized-enkf-reduces-icu-forecast-error"
      ],
      "communication_gap": "Adjacent communities use different software stacks and validation norms, so mathematically equivalent tools are often rediscovered in parallel.",
      "translation_table": [
        {
          "field_a_term": "Forecast ensemble",
          "field_b_term": "Physiologic trajectory ensemble",
          "note": "Both approximate uncertainty over latent state evolution."
        },
        {
          "field_a_term": "Localization",
          "field_b_term": "Compartment-specific coupling limits",
          "note": "Suppresses spurious long-range covariance artifacts."
        },
        {
          "field_a_term": "Innovation",
          "field_b_term": "Observed-minus-predicted biomarker residual",
          "note": "Drives update step and model diagnostics."
        }
      ],
      "references": [
        {
          "doi": "10.1115/1.3662552",
          "note": "Kalman (1960) linear filtering and prediction."
        },
        {
          "doi": "10.1175/1520-0469(1994)051<1747:ROEOIA>2.0.CO;2",
          "note": "Lorenc (1994) data assimilation foundations."
        }
      ],
      "last_reviewed": "2026-05-08",
      "file": "cross-domain/geoscience-medicine/b-ensemble-kalman-smoothing-x-icu-latent-state-estimation.yaml"
    },
    {
      "id": "b-ensemble-smoother-x-precision-oncology-state-estimation",
      "title": "Ensemble smoothing from geoscience data assimilation transfers to latent-state estimation in precision oncology.",
      "source_domain": "",
      "target_domain": "",
      "status": "proposed",
      "bridge_claim": "Speculative analogy: Ensemble smoothing from geoscience data assimilation transfers to latent-state estimation in precision oncology.",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-ensemble-smoothers-improve-precision-oncology-trajectory-calibration"
      ],
      "communication_gap": "The two communities use different notation, benchmarks, and publication venues, which obscures transferable mathematical structure.",
      "translation_table": [],
      "references": [
        {
          "doi": "10.1175/1520-0493(2001)129<2776:AENRFA>2.0.CO;2",
          "note": "Ensemble assimilation foundations."
        }
      ],
      "last_reviewed": "2026-05-08",
      "file": "cross-domain/geoscience-medicine/b-ensemble-smoother-x-precision-oncology-state-estimation.yaml"
    },
    {
      "id": "b-earthquake-self-organized-criticality",
      "title": "The Gutenberg-Richter and Omori laws are empirical signatures of self-organized criticality: fault networks spontaneously evolve to the critical point of the BTW sandpile universality class, unifying earthquake statistics with statistical physics.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "The Gutenberg-Richter law (log N(M) = a - bM, empirical b ≈ 1 globally) states that the number of earthquakes of magnitude M decreases as a power law: N(M) ~ 10^{-bM}, or equivalently the seismic energy E scales as P(E) ~ E^{-τ} with τ ≈ 1.67. Omori's aftershock decay law states that the rate of aft",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-gutenberg-richter-soc-btw-exponent",
        "h-gutenberg-richter-percolation-threshold"
      ],
      "communication_gap": "Bak et al. published SOC in Physical Review Letters (1987) for physicists. Seismologists had Gutenberg-Richter since 1944 and Omori since 1894. Bak & Tang (1989) explicitly connected the two in Journal of Geophysical Research, but the connection remained peripheral to mainstream seismology, which uses empirical scaling laws without invoking criticality theory. The barrier is disciplinary: SOC language (universality classes, order parameters, renormalization group) is foreign to most geophysicists, who prefer phenomenological regression models. Cross-department training and joint publications are rare.\n",
      "translation_table": [
        {
          "field_a_term": "Fault network stress accumulation (tectonic loading)",
          "field_b_term": "Grain addition to sandpile (slow driving)",
          "note": "Both are slow external inputs that build stored potential energy"
        },
        {
          "field_a_term": "Earthquake of magnitude M (seismic energy E)",
          "field_b_term": "Avalanche of size s in BTW sandpile",
          "note": "Rapid redistribution events; size distributions are power laws"
        },
        {
          "field_a_term": "Gutenberg-Richter: N(M) ~ 10^{-bM}, b ≈ 1",
          "field_b_term": "BTW avalanche: P(s) ~ s^{-3/2}",
          "note": "Both are power laws; b-value maps to SOC exponent τ via energy-magnitude relation"
        },
        {
          "field_a_term": "Omori aftershock decay: n(t) ~ t^{-p}, p ≈ 1",
          "field_b_term": "Temporal clustering of SOC avalanches at criticality",
          "note": "Long-range temporal correlations are a universal feature of SOC"
        },
        {
          "field_a_term": "b-value ≈ 1 (universal across tectonic settings)",
          "field_b_term": "SOC exponent at the critical point",
          "note": "The universality of b ≈ 1 reflects the universality class of the critical point"
        },
        {
          "field_a_term": "Spatial correlation length of aftershock zone",
          "field_b_term": "Correlation length diverging at the critical point (ξ → ∞)",
          "note": "Both describe the spatial extent of event clusters"
        },
        {
          "field_a_term": "Seismic quiescence before large earthquakes",
          "field_b_term": "Critical slowing down near the critical point",
          "note": "Predicted by SOC but empirically contested in seismology"
        }
      ],
      "references": [
        {
          "doi": "10.1103/PhysRevLett.59.381",
          "note": "Bak, Tang & Wiesenfeld (1987) PRL 59:381 — original SOC sandpile model"
        },
        {
          "doi": "10.1029/JB094iB11p15635",
          "note": "Bak & Tang (1989) J Geophys Res 94:15635 — SOC applied to earthquakes"
        },
        {
          "note": "Gutenberg & Richter (1944) Bull Seism Soc Am 34:185 — empirical G-R law"
        },
        {
          "doi": "10.1007/BF01300524",
          "note": "Sornette & Sammis (1995) J Phys I France — critical point model for earthquake rupture"
        },
        {
          "note": "Omori (1894) J Coll Sci Imp Univ Tokyo 7:111 — aftershock decay law"
        }
      ],
      "last_reviewed": "2026-05-05",
      "file": "cross-domain/geoscience-physics/b-earthquake-self-organized-criticality.yaml"
    },
    {
      "id": "b-lithospheric-planform-x-rayleigh-benard-wavelength-scaling",
      "title": "Horizontal wavelengths of convection rolls and cellular patterns in Rayleigh-Bénard experiments scale with layer thickness and fluid parameters via Busse–Clever–Kelly stability diagrams — motivating cautious comparison to characteristic lateral scales of plate-boundary networks and mantle flow heterogeneity inferred from seismic tomography, distinct from merely stating “mantle convection exists.”\n",
      "source_domain": "",
      "target_domain": "",
      "status": "proposed",
      "bridge_claim": "Laboratory RB convection selects planforms whose dominant horizontal wavenumber depends on Ra, Prandtl number, and boundary conditions — mantle convection lives at enormous Ra with complex rheology and internal heating, so literal roll-spacing equality fails — yet dimensional scaling literacy helps ",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-numerical-mantle-spectral-peaks-track-effective-rb-wavenumber-branches"
      ],
      "communication_gap": "Geodynamics seminars emphasize Nu(Ra) heat-flow scaling while fluid labs showcase spectacular RB planform movies — spectral-planform vocabulary crosses fields less often than integral heat-flow budgets do.\n",
      "translation_table": [
        {
          "field_a_term": "dominant horizontal wavevector k_* (RB stability theory / Busse balloon)",
          "field_b_term": "spectral peaks in mantle velocity spherical harmonic degrees (tomography / models)",
          "note": "Qualitative spectral-selection analogy — Earth-like viscosity contrasts reshape modes relative to lab water experiments."
        },
        {
          "field_a_term": "aspect ratio Γ = diameter / layer thickness in RB tanks",
          "field_b_term": "spherical harmonic degree ℓ mapping to lateral wavelength λ ≈ 2πR/ℓ for global modes",
          "note": "Curvature and compressibility absent in planar RB — scaling comparisons require spherical harmonic filters."
        },
        {
          "field_a_term": "Busse balloon marginal stability curves",
          "field_b_term": "transitions between stagnant lid, episodic subduction, or mobile lid regimes in exoplanet convection surveys",
          "note": "Both classify solution branches though mantle rheology introduces extra bifurcation parameters."
        }
      ],
      "references": [
        {
          "doi": "10.1103/RevModPhys.81.503",
          "note": "Ahlers, Grossmann & Lohse (2009) — RB convection heat transport and pattern formation (Rev. Mod. Phys.)"
        },
        {
          "doi": "10.1017/S0022112070000599",
          "note": "Busse (1978) — lattice problems in thermal convection (J. Fluid Mech.) — planform stability context"
        }
      ],
      "last_reviewed": "2026-05-07",
      "file": "cross-domain/geoscience-physics/b-lithospheric-planform-x-rayleigh-benard-wavelength-scaling.yaml"
    },
    {
      "id": "b-mantle-convection-rayleigh-benard",
      "title": "Mantle convection driving plate tectonics is a high-Rayleigh-number Rayleigh-Bénard convection system with strongly temperature-dependent viscosity: the Rayleigh number Ra ~ 10⁷–10⁸ predicts chaotic, time- dependent flow that produces the observed pattern of plate speeds, trench depths, and heat flow at mid-ocean ridges.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "The mantle is a highly viscous fluid (η ~ 10²¹ Pa·s) heated from below by radiogenic decay and cooling from above. Rayleigh-Bénard (RB) convection occurs when buoyancy (Δρ g d) overcomes viscous resistance: Ra = αΔTρgd³/(κη) ~ 10⁷. At this Ra, RB convection is time-dependent and produces plume-domin",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-plate-tectonics-ra-viscosity-threshold"
      ],
      "communication_gap": "Fluid dynamicists studying RB convection (publishing in J. Fluid Mechanics, Physical Review Fluids) and geophysicists studying mantle convection (publishing in J. Geophysical Research, Earth and Planetary Science Letters) use different dimensionless number scalings and rarely directly cite each other's laboratory experiments.\n",
      "translation_table": [
        {
          "field_a_term": "Rayleigh number Ra of mantle convection",
          "field_b_term": "Rayleigh number Ra in RB convection experiment",
          "note": "Ra ~ 10⁷ for mantle; sets the vigour, planform, and time-dependence of convective flow"
        },
        {
          "field_a_term": "Lithosphere (cold rigid boundary)",
          "field_b_term": "Cold upper plate in RB convection (conducting boundary layer)",
          "note": "Stagnant lid ↔ rigid upper plate; plate tectonics requires lid mobilisation"
        },
        {
          "field_a_term": "Mantle plume (rising hot column from CMB)",
          "field_b_term": "Thermal plume in RB cell (rising hot column from heated base)",
          "note": "Both governed by plume stability criterion and buoyancy flux"
        },
        {
          "field_a_term": "Mid-ocean ridge heat flux",
          "field_b_term": "Heat transport efficiency Nu(Ra) in RB convection",
          "note": "Nu ~ Ra^{0.3} scaling predicts global mantle heat loss from Ra"
        }
      ],
      "references": [
        {
          "doi": "10.1029/JB075i003p00497",
          "note": "Turcotte & Oxburgh (1967) JGR – thermal convection below the lithosphere; RB framework for mantle"
        },
        {
          "doi": "10.1038/s41561-022-00913-8",
          "note": "Tackley (2008) – mantle convection models with plate tectonics: review of RB framework"
        },
        {
          "doi": "10.1126/science.1135456",
          "note": "van Keken et al. – mantle mixing and convection; Ra-heat-flow scaling"
        },
        {
          "doi": "10.1016/j.epsl.2007.10.046",
          "note": "Labrosse & Jaupart (2007) EPSL – thermal evolution of mantle and core from RB scaling laws"
        }
      ],
      "last_reviewed": "2026-05-07",
      "file": "cross-domain/geoscience-physics/b-mantle-convection-rayleigh-benard.yaml"
    },
    {
      "id": "b-river-braiding-x-soc-like-morphodynamics",
      "title": "Braided rivers exhibit channel splitting and merging producing avalanche-like bedload fluctuations and broad scaling regimes reminiscent of self-organized criticality phenomenology — yet identifying definitive SOC universality classes for real rivers remains speculative and should be labeled as hypothesis-stage analogy pending rigorous scaling collapses on controlled morphodynamic datasets.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "proposed",
      "bridge_claim": "**[Speculation — not established equivalence]** Laboratory braided streams and numerical cellular models show punctuated avulsion events and heavy-tailed distributions of storage increments resembling avalanche statistics in sandpile SOC toy models. Real rivers embed discharge variability, vegetatio",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-river-braiding-x-soc-like-morphodynamics"
      ],
      "communication_gap": "Geomorphologists publish heavy-tail statistics cautiously while statistical physics popularizations sometimes overclaim SOC ubiquity — explicit speculation labeling missing from many cross-disciplinary conference abstracts.\n",
      "translation_table": [
        {
          "field_a_term": "Avulsion / channel migration events (geomorphology)",
          "field_b_term": "Avalanche-size cascades in SOC sandpile models (statistical physics)",
          "note": "Phenomenological similarity only — scaling exponents not universally agreed upon across field sites."
        },
        {
          "field_a_term": "Bedload transport bursts",
          "field_b_term": "Power-law transport bursts hypothesized near critical points",
          "note": "Alternative generating mechanisms include correlated noise without critical phase transition."
        }
      ],
      "references": [
        {
          "doi": "10.1103/PhysRevLett.59.381",
          "note": "Bak, Tang & Wiesenfeld (1987) — SOC sandpile reference baseline for analogy framing"
        },
        {
          "doi": "10.1029/2002JF000262",
          "note": "Murray & Paola (2003) JGR Earth Surface — cellular braided-stream modeling context for scaling debates"
        }
      ],
      "last_reviewed": "2026-05-07",
      "file": "cross-domain/geoscience-physics/b-river-braiding-x-soc-like-morphodynamics.yaml"
    },
    {
      "id": "b-glacier-dynamics-glens-law",
      "title": "Glacier flow obeys Glen's flow law, a power-law viscosity relation that maps glaciology onto non-Newtonian viscous fluid mechanics, enabling glaciologists to use Stokes flow equations to predict ice sheet dynamics and sea-level contributions.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "Ice deformation follows Glen's flow law epsilon_dot = A * tau^n (n ~ 3), making glacier ice a non-Newtonian shear-thinning fluid; this maps ice sheet dynamics onto the Stokes equations for viscous flow with a strain-rate-dependent effective viscosity, allowing fluid dynamical analyses (lubrication a",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-marine-ice-sheet-instability-threshold"
      ],
      "communication_gap": "Glaciologists develop specialized ice-sheet models (ISSM, BISICLES, Elmer/Ice) while the fluid mechanics community develops general viscous flow solvers; there is limited cross-training, leading to glaciologists reinventing numerical methods standard in computational fluid dynamics and fluid mechanicians unaware of glaciological datasets.\n",
      "translation_table": [
        {
          "field_a_term": "Glen's flow law (glaciology)",
          "field_b_term": "power-law viscosity / Ostwald-de Waele model (fluid mechanics)",
          "note": "epsilon_dot = A*tau^n is the glaciological form of the power-law fluid constitutive relation"
        },
        {
          "field_a_term": "effective viscosity of ice (glaciology)",
          "field_b_term": "strain-rate-dependent viscosity eta(epsilon_dot) (rheology)",
          "note": "eta_eff = (A*tau^(n-1))^(-1); shear-thinning behavior enables fast outlet glacier flow"
        },
        {
          "field_a_term": "shallow ice approximation (glaciology)",
          "field_b_term": "lubrication approximation in thin film flow (fluid mechanics)",
          "note": "Both exploit aspect ratio epsilon = H/L << 1 to reduce 3D Stokes to 2D depth-averaged equations"
        },
        {
          "field_a_term": "basal sliding / till deformation (glaciology)",
          "field_b_term": "boundary layer slip condition (fluid mechanics)",
          "note": "Weertman and Coulomb sliding laws are the glaciological analog of partial-slip boundary conditions"
        }
      ],
      "references": [
        {
          "doi": "10.3189/172756455781507085",
          "note": "Glen (1955) - original experimental determination of ice flow law"
        },
        {
          "doi": "10.5194/tc-7-51-2013",
          "note": "Pattyn et al. (2013) - benchmark comparison of ice sheet models using Stokes equations"
        },
        {
          "doi": "10.1029/2005GL025127",
          "note": "Schoof (2007) - ice sheet grounding line dynamics and marine ice sheet instability"
        }
      ],
      "last_reviewed": "2026-05-07",
      "file": "cross-domain/glaciology-fluid-mechanics/b-glacier-dynamics-glens-law.yaml"
    },
    {
      "id": "b-glacier-isostasy-viscoelastic-rebound",
      "title": "Glacial isostatic adjustment (GIA) connects glaciology and geophysics through viscoelastic rebound: ice sheet loading depresses the Earth's crust elastically and viscously, and postglacial rebound follows viscoelastic relaxation theory with the mantle acting as a Maxwell fluid on timescales of thousands of years.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "A Maxwell viscoelastic solid responds to stress with both elastic (Hookean) and viscous (Newtonian) components: ε̇ = σ̇/E + σ/η (E = Young's modulus, η = dynamic viscosity). Under ice loading σ₀, the elastic displacement is immediate: u_e = σ₀/E; the viscous relaxation follows u(t) = u_e·(1 - e^{-t/",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-west-antarctic-ice-sheet-gia-feedback-marine-ice-instability"
      ],
      "communication_gap": "Glaciologists measuring ice mass balance and geophysicists modeling mantle structure work on the same signal (vertical land motion) from opposite directions; GIA modeling requires inputs from both communities (ice history reconstruction + mantle rheology), but the ice history uncertainties dominate GIA predictions in Antarctica and cross-disciplinary model reconciliation is an active challenge.\n",
      "translation_table": [
        {
          "field_a_term": "ice sheet loading / unloading (glaciology)",
          "field_b_term": "surface load on viscoelastic half-space (geophysics)",
          "note": "Ice load is treated as a pressure boundary condition on the elastic-viscous Earth model"
        },
        {
          "field_a_term": "glacial isostatic adjustment GIA (glaciology)",
          "field_b_term": "viscoelastic stress relaxation in mantle (geophysics)",
          "note": "GIA is the geophysical manifestation of mantle viscous relaxation after ice removal"
        },
        {
          "field_a_term": "postglacial rebound rate (glaciology)",
          "field_b_term": "Maxwell relaxation time τ = η/E (geophysics)",
          "note": "Rebound rate depends on mantle viscosity profile; rate constrains mantle rheology"
        },
        {
          "field_a_term": "sea level change from ice loss (glaciology)",
          "field_b_term": "geoid deformation + elastic uplift correction (geophysics)",
          "note": "True sea level change requires GIA correction because land is still rising"
        }
      ],
      "references": [
        {
          "doi": "10.1126/science.1157376",
          "note": "Peltier (2004) - global glacial isostasy and the surface of the ice-age Earth"
        },
        {
          "doi": "10.1146/annurev.earth.32.082503.144359",
          "note": "Milne & Mitrovica (2008) - searching for eustasy in deglacial sea-level histories"
        },
        {
          "doi": "10.1016/j.quascirev.2011.07.021",
          "note": "Whitehouse et al. (2012) - GIA model for Antarctica"
        }
      ],
      "last_reviewed": "2026-05-07",
      "file": "cross-domain/glaciology-geophysics/b-glacier-isostasy-viscoelastic-rebound.yaml"
    },
    {
      "id": "b-glacier-calving-fracture-mechanics",
      "title": "Glacier calving — the detachment of icebergs from tidewater glaciers — follows the same fracture mechanics as crack propagation in brittle materials: the calving rate is controlled by a stress intensity factor at the ice-water or ice-air interface that must exceed the mode-I fracture toughness of polycrystalline ice (~0.1 MPa m^0.5)",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "Calving of icebergs is governed by linear elastic fracture mechanics (LEFM): a pre-existing crevasse or basal water crack propagates when the stress intensity factor K_I = sigma * sqrt(pi * a) (where sigma is net tensile stress and a is crack half-length) reaches the fracture toughness K_Ic of ice (",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-glacier-calving-fracture-toughness-prediction"
      ],
      "communication_gap": "Glaciologists model calving using empirical calving laws or force-balance approaches while materials scientists apply LEFM; the transfer of LEFM tools to ice was advanced by Weertman (1973) but wide adoption in operational calving models has lagged due to the difficulty of characterizing ice heterogeneity at glacier scale.",
      "translation_table": [
        {
          "field_a_term": "crevasse depth and surface meltwater ponding (glaciology)",
          "field_b_term": "crack length and internal pressure in hydraulic fracture (materials science)",
          "note": "Water-filled crevasses propagate via the same hydrofracture mechanism as wellbore fracking"
        },
        {
          "field_a_term": "ice fracture toughness K_Ic (glaciology)",
          "field_b_term": "mode-I critical stress intensity factor (materials science)",
          "note": "Measured at ~0.1 MPa m^0.5 for polycrystalline glacier ice; sets the calving threshold"
        },
        {
          "field_a_term": "backstress from ice shelf buttressing (glaciology)",
          "field_b_term": "compressive residual stress inhibiting crack growth (materials science)",
          "note": "Ice shelf back-pressure acts like residual compression that prevents crack opening"
        },
        {
          "field_a_term": "calving front retreat rate (glaciology)",
          "field_b_term": "crack propagation velocity / fatigue crack growth rate (materials science)",
          "note": "Both follow Paris-law-type power-law relationships between stress intensity range and advance rate"
        }
      ],
      "references": [
        {
          "doi": "10.1017/S0022143000022577",
          "note": "Weertman (1973) - can a water-filled crevasse reach the bottom surface of a glacier? Foundational LEFM application to ice"
        },
        {
          "doi": "10.1029/2006JF000664",
          "note": "Bassis & Walker (2012) - upper and lower limits on the stability of calving glaciers from the yield strength envelope of ice"
        },
        {
          "doi": "10.1126/science.1208478",
          "note": "Joughin et al. (2012) - ice-sheet response to oceanic forcing: calving and LEFM"
        }
      ],
      "last_reviewed": "2026-05-07",
      "file": "cross-domain/glaciology-materials-science/b-glacier-calving-fracture-mechanics.yaml"
    },
    {
      "id": "b-neural-operator-surrogates-x-groundwater-inverse-modeling",
      "title": "Neural operator surrogates connect operator learning advances to groundwater inverse modeling at basin scale.",
      "source_domain": "",
      "target_domain": "",
      "status": "proposed",
      "bridge_claim": "Speculative analogy: Fourier neural operators can approximate families of PDE solution maps for groundwater flow, enabling amortized inverse-model exploration with uncertainty-aware screening before full-physics runs.",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-fourier-neural-operator-surrogates-accelerate-groundwater-inversion-with-calibrated-uncertainty"
      ],
      "communication_gap": "Communities use different terminology and validation conventions, masking transferable method equivalence.",
      "translation_table": [],
      "references": [
        {
          "arxiv": "2010.08895",
          "note": "Fourier neural operator introduction."
        }
      ],
      "last_reviewed": "2026-05-08",
      "file": "cross-domain/hydrology-computer-science/b-neural-operator-surrogates-x-groundwater-inverse-modeling.yaml"
    },
    {
      "id": "b-river-network-hacks-law-fractal",
      "title": "River network geometry obeys Hack's law (L ~ A^{0.6}) and Horton's laws of stream numbers and lengths because river networks are statistically self-similar (fractal) structures grown by optimal channel network (OCN) theory - an energy-minimisation principle that mathematics predicts and hydrology observes across six orders of magnitude in drainage area.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "Hack's law states that main stream length L ~ A^h where h ~ 0.57-0.60, meaning rivers are not straight (h = 0.5 would be space-filling) but space-meandering. Horton's laws state that stream number N_k = R_b^k (bifurcation ratio R_b ~ 3-5) and stream lengths L_k ~ R_b^k / R_L (length ratio R_L ~ 1.5-",
      "open_unknowns": [],
      "related_hypotheses": [],
      "communication_gap": "Hydrologists use Horton's laws and Hack's law as empirical rules without always connecting them to fractal geometry and energy optimisation theory; fractal mathematicians rarely work with digital elevation models and hydrological field data. The quantitative derivation of Hack's law from OCN theory is not widely known outside the geomorphology community.\n",
      "translation_table": [
        {
          "field_a_term": "Hausdorff fractal dimension D ~ 1.8 (mathematics)",
          "field_b_term": "space-filling but non-planar river network geometry (hydrology)",
          "note": "D measured by box-counting on river network maps; D = 2 would be fully space-filling"
        },
        {
          "field_a_term": "energy minimisation principle (mathematics)",
          "field_b_term": "fluvial erosion selects channels that minimise energy expenditure (hydrology)",
          "note": "OCN theory: rivers evolve toward minimum total energy dissipation configuration"
        },
        {
          "field_a_term": "Hack's law exponent h ~ 0.57 (mathematics)",
          "field_b_term": "relationship between mainstream length and basin area (hydrology)",
          "note": "h = 1/D_fractal * some geometric factor; derived analytically from OCN theory"
        },
        {
          "field_a_term": "Horton bifurcation ratio R_b (mathematics)",
          "field_b_term": "average number of tributaries per stream at each order (hydrology)",
          "note": "R_b ~ 3-5 universally; reflects fractal branching structure of drainage networks"
        }
      ],
      "references": [
        {
          "doi": "10.1029/WR010i005p00969",
          "note": "Hack (1957) - studies of longitudinal stream profiles in Virginia and Maryland"
        },
        {
          "doi": "10.1017/CBO9780511512865",
          "note": "Rodriguez-Iturbe & Rinaldo (1997) - Fractal River Basins; OCN theory"
        },
        {
          "doi": "10.1038/s41586-020-2098-5",
          "note": "Seybold et al. (2020) - branching geometry of river networks"
        }
      ],
      "last_reviewed": "2026-05-07",
      "file": "cross-domain/hydrology-mathematics/b-river-network-hacks-law-fractal.yaml"
    },
    {
      "id": "b-immune-regulation-feedback",
      "title": "The immune system is a proportional-integral (PI) feedback controller — T-regulatory cells implement integral negative feedback on effector T-cell responses, maintaining self-tolerance exactly as a PI controller eliminates steady-state error.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "proposed",
      "bridge_claim": "Classical feedback control theory provides a precise mathematical framework for immune regulation. The IL-2 / T-regulatory cell (Treg) circuit implements a proportional- integral (PI) control loop maintaining self-tolerance against autoimmunity.\nIn control engineering terms: effector T-cells (T_eff)",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-autoimmune-pi-gain-deficiency"
      ],
      "communication_gap": "Immunologists describe regulatory circuits in terms of cell types and cytokines; control engineers describe them in transfer functions and Bode plots. The conceptual translation is clear but the mathematical formalism rarely crosses disciplinary boundaries. Systems biology occupies the middle ground but rarely uses formal control-theoretic tools (stability margins, Nyquist criterion, robust control). Clinical immunology training does not include feedback control theory.\n",
      "translation_table": [
        {
          "field_a_term": "effector T-cell count T_eff",
          "field_b_term": "plant output y(t) to be regulated"
        },
        {
          "field_a_term": "T-regulatory cell count T_reg",
          "field_b_term": "controller state (integrator variable)"
        },
        {
          "field_a_term": "IL-2 cytokine concentration",
          "field_b_term": "error signal e(t) = reference - output"
        },
        {
          "field_a_term": "antigen load",
          "field_b_term": "reference input r(t) / setpoint"
        },
        {
          "field_a_term": "Treg suppression of T_eff (rate k·T_reg·T_eff)",
          "field_b_term": "proportional control action u_P(t) = K_p·e(t)"
        },
        {
          "field_a_term": "Treg accumulation by IL-2 (integral of cytokine signal)",
          "field_b_term": "integral control action u_I(t) = K_i∫e(τ)dτ"
        },
        {
          "field_a_term": "autoimmune disease (insufficient Treg suppression)",
          "field_b_term": "steady-state error (controller gain too low)"
        },
        {
          "field_a_term": "cytokine storm (IL-6/TNF positive feedback loop)",
          "field_b_term": "unstable closed-loop system (positive feedback dominates)"
        },
        {
          "field_a_term": "immunosuppressive therapy (corticosteroids)",
          "field_b_term": "adding external negative feedback to stabilize the loop"
        }
      ],
      "references": [
        {
          "doi": "10.1371/journal.pcbi.1003122",
          "note": "Khailaie et al. (2013) PLOS Comput Biol — PI controller model of IL-2/Treg dynamics"
        },
        {
          "doi": "10.1186/1752-0509-4-2",
          "note": "Busse et al. (2010) BMC Syst Biol 4:2 — mathematical model of Treg-mediated tolerance"
        },
        {
          "note": "Doyle, Francis & Tannenbaum (2009) Feedback Control Theory. Dover Publications."
        },
        {
          "doi": "10.1038/msb.2010.32",
          "note": "Feinerman et al. (2010) Mol Syst Biol — IL-2 signaling quantification"
        }
      ],
      "last_reviewed": "2026-05-06",
      "file": "cross-domain/immunology-control-theory/b-immune-regulation-feedback.yaml"
    },
    {
      "id": "b-foundation-model-x-tcr-antigen-specificity-transfer",
      "title": "Sequence foundation-model pretraining bridges protein language transfer and T-cell receptor antigen-specificity inference.",
      "source_domain": "",
      "target_domain": "",
      "status": "proposed",
      "bridge_claim": "Speculative analogy (to be empirically validated): Large-scale protein sequence pretraining may transfer contextual representations to TCR-antigen binding tasks similarly to repertoire-level priors used in immunological specificity assays.",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-tcr-foundation-pretraining-improves-antigen-specificity-recall"
      ],
      "communication_gap": "Domain operators prioritize interpretable reliability diagnostics, while ML work often prioritizes aggregate accuracy without deployment-grade uncertainty audits.",
      "translation_table": [
        {
          "field_a_term": "model prior",
          "field_b_term": "domain prior",
          "note": "Both constrain inference in data-sparse regimes."
        },
        {
          "field_a_term": "uncertainty estimate",
          "field_b_term": "risk-aware decision support",
          "note": "Uncertainty quality determines practical utility."
        },
        {
          "field_a_term": "out-of-distribution behavior",
          "field_b_term": "deployment robustness",
          "note": "Shift sensitivity governs real-world reliability."
        }
      ],
      "references": [
        {
          "arxiv": "2006.10555",
          "note": "Biological sequence language modeling."
        }
      ],
      "last_reviewed": "2026-05-08",
      "file": "cross-domain/immunology-machine-learning/b-foundation-model-x-tcr-antigen-specificity-transfer.yaml"
    },
    {
      "id": "b-lipid-nanoparticle-mrna-delivery",
      "title": "mRNA therapeutics require lipid nanoparticle delivery vehicles whose self-assembly is governed by hydrophobic balance and ionizable lipid pKa — a materials science problem with immunological constraints.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "mRNA therapeutics (breakthrough gap bg-mrna-programmable-medicine) require delivery vehicles that protect fragile single-stranded mRNA from serum nucleases and enable endosomal escape into the cytoplasm. Lipid nanoparticles (LNPs) — self-assembled from ionizable lipids, PEG-lipids, cholesterol, and ",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-ionizable-lipid-pka-endosomal-escape"
      ],
      "communication_gap": "LNP development sits at the intersection of lipid biophysics, formulation chemistry, immunology, and pharmacology. Academic labs typically specialize in one aspect; industrial development integrates all four but rarely publishes mechanistic detail. The link between specific LNP materials properties (pKa, HLB) and immunological outcomes (innate vs adaptive response) is incompletely understood.\n",
      "translation_table": [
        {
          "field_a_term": "hydrophilic-lipophilic balance (HLB)",
          "field_b_term": "LNP self-assembly morphology (micelle vs lamellar vs hexagonal)",
          "note": "HLB determines particle internal structure and endosomal escape efficiency"
        },
        {
          "field_a_term": "ionizable lipid pKa",
          "field_b_term": "endosomal escape efficiency and circulation half-life",
          "note": "pKa 6.2-6.8 is optimal — neutral at pH 7.4, cationic in endosome pH 5.5"
        },
        {
          "field_a_term": "PEG-lipid density on particle surface",
          "field_b_term": "immunological stealth (reduced macrophage uptake, anti-PEG antibody risk)",
          "note": "High PEG density = long circulation but risk of PEG-specific immune response"
        },
        {
          "field_a_term": "particle size (50-100 nm)",
          "field_b_term": "tissue biodistribution (liver vs muscle vs lymph node targeting)",
          "note": "Size and lipid composition jointly determine organ tropism"
        },
        {
          "field_a_term": "cholesterol content (40-50 mol%)",
          "field_b_term": "membrane rigidity and mRNA encapsulation efficiency",
          "note": "Cholesterol provides structural stability and modulates fusion with endosomal membrane"
        }
      ],
      "references": [
        {
          "note": "Semple et al. (2010) Rational design of cationic lipids for siRNA delivery. Nat Biotechnol 28:172-176"
        },
        {
          "note": "Kulkarni et al. (2019) Lipid nanoparticles enabling gene therapies: from concepts to clinical utility. Nanoscale 11:21733-21739"
        },
        {
          "note": "Akinc et al. (2019) The Onpattro story and the clinical translation of nanomedicines containing nucleic acid-based drugs. Nat Nanotechnol 14:1084-1087"
        },
        {
          "note": "Pardi et al. (2018) mRNA vaccines — a new era in vaccinology. Nat Rev Drug Discov 17:261-279"
        }
      ],
      "last_reviewed": "2026-05-06",
      "file": "cross-domain/immunology-materials-science/b-lipid-nanoparticle-mrna-delivery.yaml"
    },
    {
      "id": "b-borrelia-immune-evasion",
      "title": "Borrelia burgdorferi's VlsE antigenic variation and complement evasion — studied separately in microbiology and immunology — together constitute a unified immune-escape architecture with direct therapeutic implications.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "Borrelia burgdorferi evades host immunity through two mechanistically distinct but synergistic strategies that span the microbiology–immunology boundary. (1) Antigenic variation (VlsE): Borrelia encodes 15+ silent vls cassettes on a linear plasmid (lp36). During mammalian infection, segmental gene c",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-borrelia-triple-combo-persister-eradication"
      ],
      "communication_gap": "Lyme disease microbiology researchers (Borrelia molecular biology, plasmid genetics, gene conversion) publish primarily in Infection and Immunity, Journal of Bacteriology, and mBio. Clinical immunologists studying PTLDS and seronegative Lyme publish in Clinical Infectious Diseases, Arthritis & Rheumatology, and Frontiers in Medicine. The molecular biology of VlsE is rarely discussed in clinical immunology papers, and immunological consequences of antigenic variation are rarely quantified in microbiology papers. Structural biologists who could design CRASP-blocking drugs are largely absent from Lyme disease conferences.\n",
      "translation_table": [
        {
          "field_a_term": "VlsE gene conversion rate (microbiology)",
          "field_b_term": "rate of antibody escape (immunology)",
          "note": "Each gene conversion event produces a new antigenic variant; the rate sets how quickly humoral immunity is defeated"
        },
        {
          "field_a_term": "vls silent cassette diversity on lp36 (microbiology)",
          "field_b_term": "breadth of antibody evasion space (immunology)",
          "note": "Greater cassette diversity means more variants can be generated, overwhelming the adaptive immune repertoire"
        },
        {
          "field_a_term": "CRASP-Factor H binding affinity (microbiology/structural biology)",
          "field_b_term": "complement evasion efficiency (immunology)",
          "note": "Higher CRASP-FH binding Kd directly predicts reduced C3b deposition and serum resistance"
        },
        {
          "field_a_term": "OspC downregulation timing (microbiology)",
          "field_b_term": "window of antibody vulnerability (immunology)",
          "note": "OspC is expressed only briefly during tick feeding — antibody targeting requires vaccines that act within this narrow window"
        }
      ],
      "references": [
        {
          "doi": "10.1128/IAI.69.10.6678-6686.2001",
          "note": "Labandeira-Rey & Skare (2001) Decreased infectivity in Borrelia burgdorferi strain B31 is associated with loss of linear plasmid 25 or 28-1. Infect Immun 69:6678."
        },
        {
          "doi": "10.1016/S0092-8674(00)80448-6",
          "note": "Zhang et al. (1997) Antigenic variation in Lyme disease borreliae by promiscuous recombination of VMP-like sequence cassettes. Cell 89:275–285."
        },
        {
          "doi": "10.1016/j.micinf.2005.12.027",
          "note": "Kraiczy & Würzner (2006) Complement escape of human pathogenic bacteria by acquisition of complement regulators. Microbes Infect 8:1298–1307."
        },
        {
          "doi": "10.1038/nrdp.2016.90",
          "note": "Steere et al. (2016) Lyme disease. Nat Rev Dis Primers 2:16090."
        }
      ],
      "last_reviewed": "2026-05-06",
      "file": "cross-domain/immunology-microbiology/b-borrelia-immune-evasion.yaml"
    },
    {
      "id": "b-immune-network-idiotypic",
      "title": "Jerne's immune network theory (1974) — antibodies recognising other antibodies (idiotypes) form a self-regulating scale-free network whose attractor dynamics implement immune memory and self-tolerance — is formally equivalent to a Hopfield associative memory network; immunological disorders correspond to network bifurcations.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "proposed",
      "bridge_claim": "Jerne (1974) proposed that the immune system is a network: antibodies (idiotypes) can be recognised by other antibodies (anti-idiotypes) as if they were foreign antigens. This creates a network of mutual recognition where antibody concentrations influence each other — an immune network.\nMATHEMATICAL",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-autoimmune-disease-idiotypic-attractor-bifurcation"
      ],
      "communication_gap": "Jerne's network theory was highly influential in theoretical immunology in the 1980s-90s but fell out of favour as molecular immunology focused on antigen-specific mechanisms. The mathematical framework (attractor dynamics, network topology) was developed primarily by mathematical biologists (Perelson, Varela, Stewart) rather than experimental immunologists. Modern B-cell repertoire sequencing now makes it feasible to reconstruct idiotypic networks empirically — but the theoretical framework developed 30 years ago is rarely cited in current immunoinformatics papers.\n",
      "translation_table": [
        {
          "field_a_term": "idiotype network node xᵢ",
          "field_b_term": "antibody clone concentration",
          "note": "Each node in the idiotypic network is a clonotype defined by its variable region sequence"
        },
        {
          "field_a_term": "network edge Jᵢⱼ",
          "field_b_term": "paratope-idiotope recognition strength",
          "note": "Structural complementarity between antibody paratope and idiotope determines coupling strength"
        },
        {
          "field_a_term": "network attractor (energy minimum)",
          "field_b_term": "immune memory state / homeostatic set point",
          "note": "The healthy immune system occupies a stable attractor; perturbations (infection, vaccination) shift it"
        },
        {
          "field_a_term": "bifurcation / attractor transition",
          "field_b_term": "autoimmune disease onset / immune dysregulation",
          "note": "Autoimmunity may correspond to bifurcation to a pathological attractor where self-reactive clones are stable"
        },
        {
          "field_a_term": "scale-free hub nodes",
          "field_b_term": "natural antibodies (broadly reactive IgM antibodies)",
          "note": "Highly connected idiotype hubs modulate the entire network, analogous to hub nodes in social networks"
        }
      ],
      "references": [
        {
          "doi": "10.1016/S0020-7519(74)80093-X",
          "note": "Jerne, N.K. (1974). Towards a network theory of the immune system. Ann. Immunol. (Inst. Pasteur) 125C:373–389."
        },
        {
          "doi": "10.1016/0165-2478(89)90133-5",
          "note": "Perelson, A.S. (1989). Immune network theory. Immunol. Rev. 110:5–36."
        },
        {
          "doi": "10.1016/0167-5699(91)90106-A",
          "note": "Varela, F.J. & Coutinho, A. (1991). Second generation immune networks. Immunol. Today 12:159–166."
        },
        {
          "note": "Stewart, J. & Varela, F.J. (1991). Morphogenesis in shape-space. J. Theor. Biol. 153:477–498. -- Attractor dynamics in idiotypic networks"
        }
      ],
      "last_reviewed": "2026-05-06",
      "file": "cross-domain/immunology-network-science/b-immune-network-idiotypic.yaml"
    },
    {
      "id": "b-immune-recognition-statistical-pattern-detection",
      "title": "The adaptive immune system solves a high-dimensional pattern detection problem using stochastic V(D)J recombination to generate a diverse receptor repertoire, thymic selection to set affinity thresholds, and clonal expansion as a Bayesian posterior update — mathematically equivalent to a noisy channel decoder for self/non-self discrimination.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "The adaptive immune system must recognize ~10¹⁵ possible foreign antigens using only ~10⁷ circulating T-cell clones (each with a distinct T-cell receptor, TCR). This is a covering problem: the T-cell repertoire must collectively \"cover\" pathogen space while avoiding self-peptides. The physics and ma",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-tcr-repertoire-percolation-threshold-pathogen-coverage"
      ],
      "communication_gap": "Theoretical immunology (Perelson, Nowak) uses statistical physics and information theory, but clinical immunologists rarely read physics journals. The maximum entropy / statistical mechanics framework for immune diversity (Mora & Bialek) is published in J Stat Phys but rarely cited in immunology literature. Conversely, the rich experimental data on thymic selection and clonal dynamics in clinical immunology papers is underused by the theoretical physics community. NIH study sections fund immunology and physics separately.\n",
      "translation_table": [
        {
          "field_a_term": "V(D)J recombination diversity (~10¹⁸ possible TCR sequences)",
          "field_b_term": "random code ensemble in Shannon coding theory",
          "note": "Repertoire diversity is the code rate; thymic selection is the decoder"
        },
        {
          "field_a_term": "thymic selection affinity window (1 nM < K_D < 10 μM)",
          "field_b_term": "likelihood ratio threshold in Neyman-Pearson detection",
          "note": "Selection implements a decision threshold on TCR-pMHC binding affinity"
        },
        {
          "field_a_term": "T-cell repertoire covering pathogen shape space",
          "field_b_term": "percolation threshold in random covering theory",
          "note": "Minimum N_T to cover pathogen space is a geometric percolation problem"
        },
        {
          "field_a_term": "clonal expansion upon antigen recognition",
          "field_b_term": "Bayesian posterior update (likelihood × prior)",
          "note": "Clonal expansion ∝ P(pathogen | TCR binding) — amplifies matching clones"
        },
        {
          "field_a_term": "immunological memory (long-lived plasma cells)",
          "field_b_term": "sufficient statistic of past Bayesian updates",
          "note": "Memory cells encode the posterior from past infections — compressed experience"
        },
        {
          "field_a_term": "self/non-self discrimination",
          "field_b_term": "binary hypothesis testing in a noisy channel (Shannon)",
          "note": "Thymic deletion = error correction for false positives (autoimmunity risk)"
        }
      ],
      "references": [
        {
          "note": "Burnet (1959) The Clonal Selection Theory of Acquired Immunity. Cambridge University Press."
        },
        {
          "doi": "10.1016/S0092-8674(02)00761-7",
          "note": "Germain (2002) Cell 109:291 — T-cell development and selection review"
        },
        {
          "note": "Perelson & Oster (1979) J Theor Biol 81:645 — shape space and repertoire covering theory"
        },
        {
          "doi": "10.1007/s10955-011-0295-3",
          "note": "Mora & Bialek (2011) J Stat Phys 144:268 — maximum entropy model of TCR repertoire"
        }
      ],
      "last_reviewed": "2026-05-06",
      "file": "cross-domain/immunology-physics/b-immune-recognition-statistical-pattern-detection.yaml"
    },
    {
      "id": "b-masked-autoencoding-x-cryo-em-denoising-priors",
      "title": "Masked autoencoding bridges self-supervised reconstruction and cryo-EM denoising priors for pathogen structural biology.",
      "source_domain": "",
      "target_domain": "",
      "status": "proposed",
      "bridge_claim": "Speculative analogy (to be empirically validated): masked-autoencoder pretraining on molecular imagery can learn reconstruction priors that improve low-SNR cryo-EM downstream tasks without requiring exhaustive labels.",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-masked-autoencoder-pretraining-improves-cryo-em-low-snr-reconstruction"
      ],
      "communication_gap": "Self-supervised vision papers rarely report structural biology fidelity metrics needed for molecular interpretation.",
      "translation_table": [
        {
          "field_a_term": "mask-and-reconstruct objective",
          "field_b_term": "missing-information recovery in noisy cryo-EM views",
          "note": "Self-supervision trains contextual structural priors."
        },
        {
          "field_a_term": "encoder latent representation",
          "field_b_term": "compact structural motif encoding",
          "note": "Latents summarize recurring molecular geometries."
        },
        {
          "field_a_term": "pretrain-finetune transfer",
          "field_b_term": "low-label pathogen structure adaptation",
          "note": "Pretraining reduces dependence on labeled particle datasets."
        }
      ],
      "references": [
        {
          "arxiv": "2111.06377",
          "note": "Masked Autoencoders Are Scalable Vision Learners."
        },
        {
          "url": "https://www.ebi.ac.uk/emdb/",
          "note": "EMDB repository context for cryo-EM structures."
        }
      ],
      "last_reviewed": "2026-05-08",
      "file": "cross-domain/infectious-disease-machine-learning/b-masked-autoencoding-x-cryo-em-denoising-priors.yaml"
    },
    {
      "id": "b-error-threshold-information",
      "title": "Eigen's quasispecies error threshold in molecular evolution and Shannon's channel capacity theorem in information theory are the same mathematical result — the mutation rate at which genetic information is irreversibly lost is the Shannon capacity of the replication channel.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "proposed",
      "bridge_claim": "Manfred Eigen's quasispecies theory (1971) shows that a replicating population of sequences (RNA, DNA, or proteins) undergoes a phase transition at a critical mutation rate mu_c: below mu_c, a \"master sequence\" (the fittest genotype) is maintained by selection; above mu_c, the population \"melts\" int",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-viral-proofreading-shannon-optimality"
      ],
      "communication_gap": "Eigen himself noted the Shannon analogy in his Nobel lecture (1992), and several papers have formalised the equivalence. Despite this, virology journals do not use information-theoretic notation, and coding theorists are unaware that RNA viruses have been running near-capacity replication for billions of years — possibly solving problems (near-threshold operation, genome size optimisation) that information theory only recently addressed with polar codes and finite-length analysis. The language barrier is severe: \"quasispecies\", \"error catastrophe\", and \"fitness landscape\" map exactly to \"codeword\", \"channel erasure\", and \"code distance\" — but the terminologies have never been bridged in a form accessible to practitioners in both fields.\n",
      "translation_table": [
        {
          "field_a_term": "Shannon channel capacity C",
          "field_b_term": "Eigen error threshold mu_c (maximum tolerable mutation rate before information collapse)"
        },
        {
          "field_a_term": "binary symmetric channel noise probability p",
          "field_b_term": "per-base mutation rate mu per replication cycle"
        },
        {
          "field_a_term": "information rate R (bits per channel use)",
          "field_b_term": "sequence-specific information content of the master genotype (Eigen value)"
        },
        {
          "field_a_term": "error-correcting code (redundancy added to protect signal)",
          "field_b_term": "DNA repair, proofreading, error correction (redundancy evolved to protect genome)"
        },
        {
          "field_a_term": "Shannon entropy H(p) = -p*log(p) - (1-p)*log(1-p)",
          "field_b_term": "sequence entropy of the quasispecies distribution above error threshold"
        },
        {
          "field_a_term": "capacity-achieving code (polar code, LDPC)",
          "field_b_term": "error-threshold-optimising genome (approaches mu_c from below to maximise evolvability)"
        },
        {
          "field_a_term": "block length N (code length)",
          "field_b_term": "genome length L (number of base pairs)"
        }
      ],
      "references": [
        {
          "doi": "10.1007/BF01402390",
          "note": "Eigen (1971) — original quasispecies theory and error threshold"
        },
        {
          "doi": "10.1007/978-3-642-67247-7",
          "note": "Eigen & Schuster (1979) — The Hypercycle; explicit Shannon analogy section"
        },
        {
          "doi": "10.1006/jtbi.1997.0536",
          "note": "Domingo & Holland (1997) — formal equivalence of error threshold and channel capacity"
        },
        {
          "doi": "10.1103/PhysRevLett.86.5819",
          "note": "Wilke et al. (2001) — quasispecies on fitness landscapes; phase transition confirmed"
        },
        {
          "doi": "10.1038/nature09149",
          "note": "Denison et al. (2011) — coronavirus nsp14 proofreading exonuclease; information-theoretic function"
        },
        {
          "arxiv": "0901.2137",
          "note": "Arikan (2009) — polar codes achieve Shannon capacity; the engineering analog of biological proofreading"
        }
      ],
      "last_reviewed": "2026-05-04",
      "file": "cross-domain/information-evolution/b-error-threshold-information.yaml"
    },
    {
      "id": "b-knowledge-overload-findability",
      "title": "Scientific knowledge overload is a channel-capacity problem: the rate of cross-domain insight generation is limited not by the volume of published results but by the bandwidth of the translation layer between domain vocabularies — structured cross-domain bridges function as a lossless codec reducing mutual information distance without destroying signal.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "proposed",
      "bridge_claim": "Shannon's channel capacity theorem (C = B log₂(1 + S/N)) provides a formal framework for the scientific knowledge overload problem. Consider each scientific domain as a transmitter and each researcher as a receiver. The effective \"channel\" between domains is limited by: (1) the noise introduced by d",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-bridge-catalog-reduces-rediscovery-lag"
      ],
      "communication_gap": "Information theory (Shannon, Weaver), epistemology (Floridi), network science (Barabási), cognitive psychology (Sweller), and recommender systems (Maes, Resnick, Varian) have each independently described aspects of the knowledge overload problem from their own disciplinary perspectives. These communities publish in non-overlapping venues (Bell System Technical Journal, Minds & Machines, Science, Cognitive Science, Communications of the ACM) and have not converged on a unified framework. The information-theoretic treatment of scientific knowledge overload as a channel-capacity problem — with domain jargon as noise and bridge catalogs as codecs — has not appeared in the literature as an explicit cross-domain synthesis. Library science treats this as a retrieval problem; science-of-science treats it as a network problem; cognitive science treats it as a working-memory problem. None has unified these threads using Shannon's channel capacity framework.\n",
      "translation_table": [
        {
          "field_a_term": "Channel capacity C = B log₂(1 + S/N) (Shannon 1948)",
          "field_b_term": "Cross-domain discovery rate; maximum rate at which cross-domain insights can be generated",
          "note": "B = researcher cognitive bandwidth; S/N = signal-to-jargon ratio across domain boundary"
        },
        {
          "field_a_term": "Noise (channel distortion)",
          "field_b_term": "Domain-specific jargon barrier; untranslated vocabulary",
          "note": "Semantically equivalent concepts encoded in mutually unintelligible notation across fields"
        },
        {
          "field_a_term": "Codec / lossless compression",
          "field_b_term": "Bridge translation table; structured cross-domain analogy",
          "note": "Reduces mutual information distance without signal loss; enables cross-domain decoding"
        },
        {
          "field_a_term": "PageRank hub node (Barabási & Albert 1999)",
          "field_b_term": "High-connectivity mathematical structure (entropy, phase transition, percolation)",
          "note": "Hub nodes in the scientific knowledge graph appear across the most domains — foundational concepts"
        },
        {
          "field_a_term": "Recommender system / personalization agent (Maes 1994)",
          "field_b_term": "AI bridge co-pilot; relevance filtering across domain boundaries",
          "note": "When copies are free, findability is scarce — recommendation solves the matching problem"
        },
        {
          "field_a_term": "Shannon entropy H = -Σ p log₂ p",
          "field_b_term": "Epistemic gap; uncertainty in domain vocabulary mapping",
          "note": "H quantifies the information deficit between domain representations before a bridge is constructed"
        },
        {
          "field_a_term": "Cognitive load (Sweller 1988)",
          "field_b_term": "Working memory bandwidth B; finite processing capacity limits cross-domain integration",
          "note": "Extraneous cognitive load from jargon translation consumes bandwidth that should carry scientific signal"
        },
        {
          "field_a_term": "Mutual information I(A; B)",
          "field_b_term": "Degree of cross-domain structural overlap; shared mathematical structure",
          "note": "I(A;B) = 0 implies domains appear orthogonal; bridge construction reveals hidden I(A;B) > 0"
        },
        {
          "field_a_term": "Information overload (rate > channel capacity)",
          "field_b_term": "Scientific knowledge overload; publication rate exceeds cross-domain integration bandwidth",
          "note": "~4% annual growth in scientific literature doubling every 17 years; integration rate is sub-linear"
        },
        {
          "field_a_term": "Giant component threshold (percolation theory)",
          "field_b_term": "Minimum bridge catalog density for any domain to reach any other in 2–3 steps",
          "note": "Network percolation determines when the knowledge graph becomes navigable at scale"
        }
      ],
      "references": [
        {
          "note": "Shannon (1948) A Mathematical Theory of Communication. Bell Syst Tech J 27:379-423"
        },
        {
          "doi": "10.1126/science.286.5439.509",
          "note": "Barabási & Albert (1999) Emergence of scaling in random networks. Science 286:509-512"
        },
        {
          "note": "Sweller (1988) Cognitive load during problem solving: effects on learning. Cogn Sci 12:257-285"
        },
        {
          "note": "Floridi (2010) Information: A Very Short Introduction. Oxford University Press"
        },
        {
          "note": "Maes P (1994) Agents that reduce work and information overload. Commun ACM 37:31-40"
        },
        {
          "note": "Cover TM & Thomas JA (2006) Elements of Information Theory, 2nd ed. Wiley"
        },
        {
          "note": "Resnick P & Varian HR (1997) Recommender systems. Commun ACM 40:56-58"
        },
        {
          "note": "Merton RK (1961) Singletons and multiples in scientific discovery. Proc Am Philos Soc 105:470-486"
        }
      ],
      "last_reviewed": "2026-05-06",
      "file": "cross-domain/information-science-epistemology/b-knowledge-overload-findability.yaml"
    },
    {
      "id": "b-belief-propagation-x-genotype-phasing-linkage-graphs",
      "title": "Belief propagation on factor graphs bridges probabilistic inference in computer science with haplotype phasing and genotype imputation pipelines in statistical genetics.",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "Established engineering practice uses sum-product / approximate message passing algorithms on graphical models for large-scale genotype phasing and related inference tasks; residual speculative analogy only at model mismatch boundaries—loopy graphs and rare-variant regimes break standard BP converge",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-damped-bp-calibration-improves-phasing-accuracy"
      ],
      "communication_gap": "Genetics software emphasizes throughput on biobank scales while ML theory papers emphasize convergence certificates rarely mapped to cohort-specific linkage structures.",
      "translation_table": [
        {
          "field_a_term": "factor graph messages",
          "field_b_term": "linkage-block updates / haplotype posteriors",
          "note": "Implementation-specific schedules affect convergence."
        },
        {
          "field_a_term": "damping parameter",
          "field_b_term": "stabilized updates in loopy linkage graphs",
          "note": "Heuristic ties across communities differ in naming."
        },
        {
          "field_a_term": "marginal probabilities",
          "field_b_term": "phased haplotype posteriors",
          "note": "Calibration audits differ by cohort ascertainment."
        }
      ],
      "references": [
        {
          "url": "https://www.cs.toronto.edu/~radford/belief.pdf",
          "note": "Classic exposition of belief propagation on graphical models (Pearl lineage)."
        }
      ],
      "last_reviewed": "2026-05-09",
      "file": "cross-domain/information-theory-genetics/b-belief-propagation-x-genotype-phasing-linkage-graphs.yaml"
    },
    {
      "id": "b-shannon-entropy-genetic-information",
      "title": "DNA is a digital information storage medium whose structure, redundancy, and mutation dynamics are quantitatively captured by Shannon's information theory — the genetic code is a natural error-correcting code whose properties minimize the cost of single-nucleotide substitutions.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "Shannon's (1948) framework maps onto molecular genetics with striking precision. The DNA alphabet has size q = 4 (A, T, G, C), so the maximum entropy per position is log₂(4) = 2 bits. The information content of a genome of length L is:\n\n  I_genome = Σᵢ H(position i) bits ≤ 2L bits\n\nThe genetic code ",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-genetic-code-error-correcting-design"
      ],
      "communication_gap": "Information theorists rarely study biology; molecular biologists rarely apply Shannon's framework quantitatively. The formal equivalence between DNA replication and a noisy channel is acknowledged in bioinformatics but rarely taught as a founding principle. The error-correcting interpretation of the genetic code (Freeland & Hurst 1998) is a landmark result that remains outside standard genetics curricula.\n",
      "translation_table": [
        {
          "field_a_term": "alphabet size q",
          "field_b_term": "nucleotide bases (A, T, G, C)",
          "note": "q = 4 for DNA/RNA; q = 20 for amino acid sequences"
        },
        {
          "field_a_term": "Shannon entropy H(X)",
          "field_b_term": "sequence variability per position",
          "note": "High-entropy positions are variable (evolutionarily unconstrained); low-entropy positions are conserved"
        },
        {
          "field_a_term": "channel noise / bit error rate",
          "field_b_term": "mutation rate per base per generation",
          "note": "Both quantify the probability of a symbol being changed during transmission/replication"
        },
        {
          "field_a_term": "redundancy / error-correcting code",
          "field_b_term": "genetic code degeneracy (synonymous codons)",
          "note": "Synonymous codons provide natural redundancy that buffers the phenotype against single-base errors"
        },
        {
          "field_a_term": "channel capacity C",
          "field_b_term": "maximum heritable information per genome",
          "note": "Shannon's theorem bounds how much information can survive noisy replication and selection"
        }
      ],
      "references": [
        {
          "note": "Shannon, C.E. (1948). A Mathematical Theory of Communication. Bell Syst. Tech. J. 27:379–423. -- Original information theory paper"
        },
        {
          "note": "Gatlin, L.L. (1972). Information Theory and the Living System. Columbia University Press. -- First systematic application of Shannon entropy to biological sequences"
        },
        {
          "doi": "10.1007/s002390010011",
          "note": "Freeland, S.J. & Hurst, L.D. (1998). The genetic code is one in a million. J. Mol. Evol. 47:238–248."
        },
        {
          "doi": "10.1038/nrg1419",
          "note": "Adami, C. (2004). Information theory in molecular biology. Nat. Rev. Genet. 5:880–888."
        },
        {
          "url": "https://doi.org/10.1093/nar/18.20.6097",
          "note": "Schneider & Stephens (1990) — sequence logos for information-theoretic visualization of binding-site specificity"
        }
      ],
      "last_reviewed": "2026-05-06",
      "file": "cross-domain/information-theory-genetics/b-shannon-entropy-genetic-information.yaml"
    },
    {
      "id": "b-language-evolution-cultural-selection",
      "title": "Language change obeys evolutionary dynamics — linguistic variants compete under frequency-dependent selection (prestige bias, conformity), the replicator equation governs variant frequencies, and historical linguistics is formally homologous to molecular phylogenetics.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "Languages change through processes that are mathematically equivalent to biological evolution: linguistic forms (words, constructions, pronunciations) are variants competing for use in a population of speakers. The analogy is not merely metaphorical — the mathematical formalisms are identical.\nRepli",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-language-change-replicator-conformity"
      ],
      "communication_gap": "Historical linguistics is one of the oldest scientific disciplines with a rigorous methodology (comparative method, 19th century). Population genetics developed in the 1920s-1930s independently. The formal mathematical homology was recognized in the 1990s-2000s but is still not standard in linguistics training. Linguists resist the \"language as organism\" metaphor (language does not have fitness in the same sense as organisms), sometimes blocking the more limited but valid claim that variant-frequency dynamics follow the same equations as allele-frequency dynamics.\n",
      "translation_table": [
        {
          "field_a_term": "linguistic variant (word, construction, pronunciation)",
          "field_b_term": "allele / biological replicator"
        },
        {
          "field_a_term": "speaker population",
          "field_b_term": "population of replicating individuals"
        },
        {
          "field_a_term": "language use frequency f_i",
          "field_b_term": "allele frequency p_i"
        },
        {
          "field_a_term": "prestige bias (copy high-status speakers)",
          "field_b_term": "directional natural selection"
        },
        {
          "field_a_term": "conformity bias (copy majority)",
          "field_b_term": "frequency-dependent stabilizing selection"
        },
        {
          "field_a_term": "neutral variant drift (random copying)",
          "field_b_term": "genetic drift (finite-population stochasticity)"
        },
        {
          "field_a_term": "language innovation (new word, borrowing)",
          "field_b_term": "mutation"
        },
        {
          "field_a_term": "language family tree (Indo-European, Austronesian)",
          "field_b_term": "phylogenetic gene tree / species tree"
        },
        {
          "field_a_term": "comparative method (reconstruct proto-language)",
          "field_b_term": "ancestral sequence reconstruction in phylogenetics"
        },
        {
          "field_a_term": "creolization (rapid simplification under selection)",
          "field_b_term": "adaptation under strong directional selection after bottleneck"
        },
        {
          "field_a_term": "Swadesh 100-word list (core vocabulary)",
          "field_b_term": "conserved ortholog genes used for phylogenetic reconstruction"
        }
      ],
      "references": [
        {
          "note": "Croft (2000) Explaining Language Change. Longman, London."
        },
        {
          "doi": "10.1126/science.291.5501.114",
          "note": "Nowak, Plotkin & Jansen (2001) Science 291:114 — evolution of universal grammar"
        },
        {
          "doi": "10.1023/A:1006601203931",
          "note": "Niyogi & Berwick (1997) Linguist Philos — mathematical model of language change"
        },
        {
          "doi": "10.1038/nature06137",
          "note": "Pagel, Atkinson & Meade (2007) Nature 449:717 — frequency law for word evolution"
        },
        {
          "doi": "10.1126/science.1219042",
          "note": "Michel et al. (2011) Science — quantitative analysis of culture via Google Ngrams (verb regularization)"
        }
      ],
      "last_reviewed": "2026-05-06",
      "file": "cross-domain/linguistics-biology/b-language-evolution-cultural-selection.yaml"
    },
    {
      "id": "b-entropy-rate-x-language-model-perplexity",
      "title": "Stochastic process entropy rate h limits optimal prediction bits per symbol for stationary ergodic sources — connecting to cross-entropy training objectives for language models whose perplexity exp(H) measures geometric mean uncertainty per token under the model distribution versus empirical text statistics.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "Shannon–McMillan–Breiman asymptotic equipartition implies typical sequences carry ~nh bits per n symbols for ergodic processes with entropy rate h. Neural language models minimize average negative log likelihood — estimating cross-entropy against empirical token distributions; perplexity normalizes ",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-entropy-rate-x-language-model-perplexity"
      ],
      "communication_gap": "NLP conferences cite perplexity operationally while omitting ergodic theory assumptions; information theory courses rarely demo transformer checkpoints despite natural pedagogical alignment.\n",
      "translation_table": [
        {
          "field_a_term": "Entropy rate h (bits per symbol)",
          "field_b_term": "Asymptotic optimal compression bits per token sequence length n divided by n",
          "note": "Language modeling aspires to approach h from above using predictive distributions."
        },
        {
          "field_a_term": "Cross-entropy H(p,q) between true next-symbol p and model q",
          "field_b_term": "Average negative log loss minimized during transformer training",
          "note": "Empirical risk minimization targets cross-entropy which upper bounds unpredictability gap vs h."
        },
        {
          "field_a_term": "Perplexity exp(H)",
          "field_b_term": "Effective branching factor per token under model uncertainty",
          "note": "Same exponential rescaling used historically in speech recognition benchmarks."
        }
      ],
      "references": [
        {
          "doi": "10.1002/j.1538-7305.1948.tb01338.x",
          "note": "Shannon (1948) Bell Syst. Tech. J. — entropy and communication over discrete channels"
        },
        {
          "doi": "10.1162/coli.2005.31.4.417",
          "note": "Manning & Schütze foundation — statistical NLP perplexity evaluation tradition"
        }
      ],
      "last_reviewed": "2026-05-07",
      "file": "cross-domain/linguistics-information/b-entropy-rate-x-language-model-perplexity.yaml"
    },
    {
      "id": "b-zipf-optimal-coding",
      "title": "Zipf's law (word frequency proportional to 1/rank) is derivable from the principle of least effort — a communication system minimising joint speaker-listener effort converges on a power-law frequency distribution identical to Shannon's optimal coding theorem applied to natural language.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "Zipf (1949) observed that the frequency of a word is inversely proportional to its rank in the frequency table: f(r) ∝ 1/r. This power law appears in word frequencies across all natural languages, city population sizes, income distributions, and protein interaction networks.\nMandelbrot (1953) derive",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-zipf-optimal-coding-universality",
        "h-zipf-critical-point-communication-efficiency"
      ],
      "communication_gap": "Zipf (1949) worked in linguistics and did not use information theory, which was formalised by Shannon the year before. Mandelbrot (1953) made the connection explicit but his work appeared in a biophysics journal and was not widely read by linguists or information theorists. The modern integration (Piantadosi et al. 2011) appears in PNAS and is known in cognitive science but not universally recognised by information theorists as a direct confirmation of optimal coding. The vocabulary mismatch — \"power law\", \"Zipf exponent\", \"rank-frequency plot\" in linguistics vs. \"source entropy\", \"code length\", \"mutual information\" in information theory — maintains the separation.\n",
      "translation_table": [
        {
          "field_a_term": "Word frequency rank (Zipf's law)",
          "field_b_term": "Probability of a symbol in a source distribution",
          "note": "High-rank words = low-frequency = low-probability symbols"
        },
        {
          "field_a_term": "Word length (syllables)",
          "field_b_term": "Codeword length (bits) in Shannon optimal code",
          "note": "Optimal coding: length ∝ −log₂(p); Zipf: length ∝ log(rank)"
        },
        {
          "field_a_term": "Speaker effort (minimise articulation)",
          "field_b_term": "Encoder compression (minimise bit length)",
          "note": "Both minimise the cost of transmission"
        },
        {
          "field_a_term": "Listener effort (minimise disambiguation)",
          "field_b_term": "Decoder reliability (maximise information per symbol)",
          "note": "Both maximise information transfer rate"
        },
        {
          "field_a_term": "Principle of least effort (Zipf 1949)",
          "field_b_term": "Source coding theorem (Shannon 1948)",
          "note": "The same optimisation principle stated in different vocabularies"
        },
        {
          "field_a_term": "Vocabulary size N",
          "field_b_term": "Alphabet size of the source",
          "note": "Both set the number of available codewords/words"
        }
      ],
      "references": [
        {
          "note": "Zipf (1949) Human Behavior and the Principle of Least Effort (Addison-Wesley)"
        },
        {
          "note": "Mandelbrot (1953) Bull Math Biophys 15:337 — derivation of Zipf from least-effort"
        },
        {
          "note": "Shannon (1948) Bell Syst Tech J 27:379 — source coding theorem"
        },
        {
          "doi": "10.1073/pnas.1012551108",
          "note": "Piantadosi, Tily & Gibson (2011) PNAS 108:3825 — word length determined by surprisal"
        }
      ],
      "last_reviewed": "2026-05-06",
      "file": "cross-domain/linguistics-information/b-zipf-optimal-coding.yaml"
    },
    {
      "id": "b-formal-grammar-automata-theory",
      "title": "Chomsky's hierarchy of formal grammars (regular, context-free, context-sensitive, recursively enumerable) is isomorphic to a hierarchy of computational automata (finite state machines, pushdown automata, linear-bounded automata, Turing machines), and natural human language sits above context-free in the mildly context-sensitive class.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "Chomsky (1956, 1959) identified a hierarchy of formal languages classified by the computational power required to generate or recognize them. The four levels and their automaton equivalences: — Type 3 (Regular): right-linear grammars → finite state automata (FSA).\n  Languages: a^n b^n is NOT regular",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-natural-language-mildly-context-sensitive-transformer-approximation"
      ],
      "communication_gap": "Chomsky's original hierarchy was developed in linguistics for theoretical syntax, but the computational automaton equivalences were worked out by computer scientists (Rabin & Scott 1959, Hopcroft & Ullman 1979). Linguists largely abandoned formal language theory after the \"cognitive revolution\" in favor of minimalist syntax; computer scientists use it for compiler theory. The empirical question of where natural language sits in the hierarchy — requiring careful corpus analysis and psycholinguistic experiments — falls between the two fields with no natural disciplinary home.\n",
      "translation_table": [
        {
          "field_a_term": "phrase structure grammar (CFG)",
          "field_b_term": "pushdown automaton (stack machine)",
          "note": "Every CFG has an equivalent PDA; CYK algorithm parses in O(n³)"
        },
        {
          "field_a_term": "regular expression (morphological rule)",
          "field_b_term": "finite state automaton (FSA)",
          "note": "English morphology (pluralization, past tense) is efficiently modeled as FSA"
        },
        {
          "field_a_term": "center-embedded relative clause",
          "field_b_term": "stack depth in pushdown automaton",
          "note": "Human working memory limits stack depth to ~2-3 levels empirically"
        },
        {
          "field_a_term": "tree-adjoining grammar (TAG)",
          "field_b_term": "mildly context-sensitive language (O(n⁶) parsing)",
          "note": "TAG captures cross-serial dependencies while remaining polynomially parseable"
        },
        {
          "field_a_term": "transformer attention head",
          "field_b_term": "approximate finite state transducer",
          "note": "Each attention head learns approximately a relation between positions — a soft FSA"
        },
        {
          "field_a_term": "V(D)J recombination in immunology (analogy)",
          "field_b_term": "stochastic context-free grammar for receptor generation",
          "note": "Immune receptor diversity modeled as SCFG; cross-domain connection to immunology"
        }
      ],
      "references": [
        {
          "doi": "10.1109/TIT.1956.1056813",
          "note": "Chomsky (1956) IRE Trans Inf Theory 2:113 — the original hierarchy paper"
        },
        {
          "note": "Joshi, Levy & Takahashi (1975) JACM 22:130 — tree-adjoining grammars and mildly context-sensitive languages"
        },
        {
          "note": "Hopcroft, Motwani & Ullman (2006) Introduction to Automata Theory, Languages, and Computation. Pearson."
        },
        {
          "note": "Manning & Schütze (1999) Foundations of Statistical Natural Language Processing. MIT Press."
        },
        {
          "doi": "10.1162/ling_a_00400",
          "note": "Shieber (1985) Linguistics & Philosophy — evidence that Swiss German is not context-free"
        }
      ],
      "last_reviewed": "2026-05-06",
      "file": "cross-domain/linguistics-mathematics/b-formal-grammar-automata-theory.yaml"
    },
    {
      "id": "b-greenberg-universals-implicational-hierarchies",
      "title": "Greenberg's linguistic universals — cross-linguistic statistical regularities in word order, morphology, and phonology — are formalized mathematically as implicational hierarchies and lattice structures: if a language has property X it tends to have property Y, forming partial orders whose structure predicts typological distributions and constrains theories of grammar.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "An implicational universal has the form X → Y (not converse): e.g., if a language has VSO order then it has prepositions (but not vice versa). Over n binary typological features, the set of attested language types forms a strict subset of the 2^n logically possible combinations. The typological spac",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-processing-efficiency-explains-word-order-universals"
      ],
      "communication_gap": "Greenberg's empirical universals (1963) preceded formal mathematical treatment by decades; typologists, formal linguists (Optimality Theory), and statistical linguists use different mathematical frameworks for the same patterns; the explicit lattice-theoretic and Boolean algebra treatments of typological universals are known in formal semantics but rarely applied to Greenberg-style cross-linguistic statistics.\n",
      "translation_table": [
        {
          "field_a_term": "linguistic universal (linguistics)",
          "field_b_term": "implication X → Y in a logical / partial-order system (mathematics)",
          "note": "Universal is a conditional statement; its mathematical form is Boolean implication"
        },
        {
          "field_a_term": "typological distribution across languages (linguistics)",
          "field_b_term": "probability distribution on a lattice of type combinations (mathematics)",
          "note": "The attested language types cluster in non-random corners of the Boolean lattice"
        },
        {
          "field_a_term": "word-order harmonic typology (linguistics)",
          "field_b_term": "consistent linear extension of a partial order (mathematics)",
          "note": "Harmonic typologies are languages whose features form a consistent chain in the poset"
        },
        {
          "field_a_term": "Optimality Theory constraint ranking (linguistics)",
          "field_b_term": "total ordering on a constraint algebra (mathematics)",
          "note": "OT grammar is mathematically a ranked constraint lattice; typological universals constrain rankings"
        }
      ],
      "references": [
        {
          "doi": "10.1515/9783110861969-005",
          "note": "Greenberg (1963) - some universals of grammar with particular reference to the order of meaningful elements"
        },
        {
          "doi": "10.1353/lan.2011.0055",
          "note": "Cysouw (2003) - the paradigmatic structure of person marking (formal lattice analysis)"
        },
        {
          "doi": "10.1515/9783110170504",
          "note": "Comrie (1989) - Language Universals and Linguistic Typology (textbook reference)"
        }
      ],
      "last_reviewed": "2026-05-07",
      "file": "cross-domain/linguistics-mathematics/b-greenberg-universals-implicational-hierarchies.yaml"
    },
    {
      "id": "b-language-contact-x-graph-interpolation",
      "title": "Language contact spreads features across speaker networks and geography, naturally modeled as diffusion, interpolation, and graph dynamics on spatial social graphs.",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "Dialect geography represents distributions of variants across locations; contact zones show mixing and gradual transitions (isogloss bundles). Mathematically, if villages or speakers are nodes and interaction strengths are edges, lexical or phonetic adoption can resemble graph diffusion or opinion d",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-lexical-diffusion-on-geographic-graphs-predicts-isoglosses"
      ],
      "communication_gap": "Linguistic atlases are published as maps; network scientists publish diffusion on abstract graphs. Shared benchmarks with georeferenced corpora are emerging but not standard.",
      "translation_table": [
        {
          "field_a_term": "graph Laplacian diffusion",
          "field_b_term": "spatial spreading of lexical variants"
        },
        {
          "field_a_term": "interpolation kernel on a graph",
          "field_b_term": "imputation between sampled dialect survey points"
        },
        {
          "field_a_term": "bottleneck edges",
          "field_b_term": "terrain or political barriers shaping isoglosses"
        }
      ],
      "references": [
        {
          "doi": "10.1353/lan.2004.0058",
          "note": "Representative quantitative dialectometry / spatial statistics bridge (journal article cluster)"
        },
        {
          "doi": "10.1073/pnas.1423852112",
          "note": "Modern data-driven language geography / population structure (illustrative network diffusion flavor)"
        }
      ],
      "last_reviewed": "2026-05-07",
      "file": "cross-domain/linguistics-mathematics/b-language-contact-x-graph-interpolation.yaml"
    },
    {
      "id": "b-language-biomarker-diagnosis",
      "title": "Computational linguistics measures of syntactic complexity, semantic coherence, and speech-rate variability serve as non-invasive biomarkers of neural health — detecting Alzheimer's disease, depression, and psychotic-spectrum formal thought disorder years before clinical presentation.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "proposed",
      "bridge_claim": "Language production requires the coordinated activity of prefrontal working memory, temporal lobe semantic networks, basal ganglia procedural systems, and cerebellar timing circuits. Pathology in any of these systems leaves quantifiable signatures in the structure of speech and text that are detecta",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-glymphatic-amyloid-clearance-rate",
        "h-linguistic-relativity-neural-boundary"
      ],
      "communication_gap": "Computational linguistics and clinical medicine communicate minimally: NLP researchers publish in ACL/EMNLP/NAACL; clinicians publish in JAMA, Lancet, Brain. The DementiaBank and similar datasets exist but are primarily used by NLP researchers with limited clinical collaboration. Clinical adoption requires regulatory approval, integration with EHR systems, and evidence from randomized trials — none of which NLP researchers are equipped to pursue alone. Psychiatry has been slow to adopt quantitative speech measures despite decades of research on formal thought disorder, in part because the measures require computational tools not standard in clinical practice.\n",
      "translation_table": [
        {
          "field_a_term": "type-token ratio (lexical richness)",
          "field_b_term": "semantic memory retrieval efficiency; declines with temporal lobe neurodegeneration"
        },
        {
          "field_a_term": "syntactic embedding depth (parse tree depth)",
          "field_b_term": "prefrontal working memory capacity; reduced in Alzheimer's and schizophrenia"
        },
        {
          "field_a_term": "sentence-to-sentence cosine similarity (semantic coherence)",
          "field_b_term": "associative looseness / formal thought disorder severity; low in psychosis"
        },
        {
          "field_a_term": "speech rate (syllables/second)",
          "field_b_term": "psychomotor speed; reduced in depression, elevated in mania"
        },
        {
          "field_a_term": "first-person singular pronoun frequency",
          "field_b_term": "depressive self-focus; anhedonic self-referential processing"
        },
        {
          "field_a_term": "semantic network clustering coefficient in speech graph",
          "field_b_term": "associative coherence; reduced in formal thought disorder, elevated in OCD"
        }
      ],
      "references": [
        {
          "doi": "10.1016/j.cortex.2005.09.001",
          "note": "Garrard et al. (2005) — lexical and semantic changes in Alzheimer's using Iris Murdoch's novels"
        },
        {
          "doi": "10.1038/s41386-019-0534-2",
          "note": "Corcoran et al. (2018) — NLP predicts psychosis conversion from baseline speech samples"
        },
        {
          "doi": "10.1016/j.bspc.2017.12.013",
          "note": "Cummins et al. (2015) — review: speech analysis for depression; features and models"
        },
        {
          "arxiv": "1907.07061",
          "note": "Petti et al. (2020) — systematic review of NLP for Alzheimer's detection from speech"
        },
        {
          "doi": "10.1093/schbul/sbx105",
          "note": "Elvevag et al. — automated analysis of free speech predicts psychosis risk"
        },
        {
          "doi": "10.1101/2020.07.23.20160416",
          "note": "Fraser et al. — linguistic features identify Alzheimer's disease in connected speech samples"
        }
      ],
      "last_reviewed": "2026-05-05",
      "file": "cross-domain/linguistics-medicine/b-language-biomarker-diagnosis.yaml"
    },
    {
      "id": "b-birdsong-syntax-formal-language-theory",
      "title": "Birdsong exhibits hierarchical combinatorial syntax that maps onto the Chomsky hierarchy of formal languages: simple species generate finite-state (regular) sequences while complex learners such as Bengalese finches produce context-free dependencies, providing a non-human animal test bed for formal language theory",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "The sequential structure of birdsong syllables can be described by a finite-state automaton (regular grammar, Chomsky Type 3) in species like canaries, but Bengalese finch songs require context-free grammars (Type 2) with non-local dependencies between motif branches; the Chomsky hierarchy thus part",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-birdsong-context-free-grammar-test"
      ],
      "communication_gap": "Ornithologists studying song learning focus on neural plasticity and vocal motor control while linguists study abstract grammatical competence; the formal language theory framing was proposed by Berwick et al. (2011) but remains contested, partly because behavioral testing for context-free dependencies requires carefully controlled playback experiments.",
      "translation_table": [
        {
          "field_a_term": "birdsong syllable transition probabilities (ornithology)",
          "field_b_term": "finite-state automaton transitions (formal language theory)",
          "note": "Markov syllable-to-syllable transitions are captured exactly by a probabilistic finite automaton"
        },
        {
          "field_a_term": "branching song motifs with non-local dependencies (ornithology)",
          "field_b_term": "context-free grammar productions with stack memory (linguistics)",
          "note": "Center-embedded motif dependencies require pushdown stack beyond regular-grammar power"
        },
        {
          "field_a_term": "HVC premotor sequence circuit (ornithology)",
          "field_b_term": "state-transition network of a finite automaton (linguistics)",
          "note": "HVC chain-like neuron sequences implement the timing and order of a finite-state machine"
        },
        {
          "field_a_term": "song learning critical period (ornithology)",
          "field_b_term": "language acquisition device / sensitive period (linguistics)",
          "note": "Both birdsong and human language acquisition are governed by a sensitive period for auditory-motor learning"
        }
      ],
      "references": [
        {
          "doi": "10.1016/j.tics.2011.01.001",
          "note": "Berwick et al. (2011) - songs to syntax: the linguistics of birdsong"
        },
        {
          "doi": "10.1038/nn1153",
          "note": "Okanoya (2004) - the Bengalese finch: a window on the behavioral neurobiology of birdsong syntax"
        },
        {
          "doi": "10.1371/journal.pone.0003842",
          "note": "Gentner et al. (2006) - recursive syntactic pattern learning by songbirds (starling context-free test)"
        }
      ],
      "last_reviewed": "2026-05-07",
      "file": "cross-domain/linguistics-ornithology/b-birdsong-syntax-formal-language-theory.yaml"
    },
    {
      "id": "b-linguistic-relativity-quantum-basis",
      "title": "Linguistic relativity (Sapir-Whorf) and quantum measurement basis choice both reveal how the observer's representational framework determines what aspects of an underdetermined reality become definite.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "proposed",
      "bridge_claim": "Linguistic relativity holds that the language one speaks shapes what aspects of perceptual reality are discriminated and categorised. Quantum measurement theory holds that the choice of measurement basis determines which quantum property becomes definite upon measurement. In both cases, an observer'",
      "open_unknowns": [],
      "related_hypotheses": [],
      "communication_gap": "Linguists studying Sapir-Whorf work in cognitive science and anthropology journals; quantum foundations researchers publish in physics and philosophy of physics. Neither reads the other. The connection has been noted informally but never formalised.\n",
      "translation_table": [
        {
          "field_a_term": "lexical category boundary (e.g., blue/green distinction)",
          "field_b_term": "measurement basis (e.g., x-spin vs z-spin)",
          "note": "The representational choice that determines what contrast becomes perceptible/definite"
        },
        {
          "field_a_term": "categorical perception effect",
          "field_b_term": "projection postulate (state collapses to eigenstate)",
          "note": "The act of applying a category sharpens the representation of what was diffuse"
        },
        {
          "field_a_term": "linguistic relativity effect size",
          "field_b_term": "basis rotation angle (degree of incompatible measurement)",
          "note": "How different the two frameworks are determines how different the induced realities are"
        },
        {
          "field_a_term": "cross-linguistic universals (color focal points)",
          "field_b_term": "preferred measurement bases (eigenstates of Hamiltonian)",
          "note": "Some representations are more natural/stable than others regardless of observer choice"
        }
      ],
      "references": [],
      "last_reviewed": "2026-05-05",
      "file": "cross-domain/linguistics-physics/b-linguistic-relativity-quantum-basis.yaml"
    },
    {
      "id": "b-coral-symbiosis-mutualism-stability-theory",
      "title": "Coral-zooxanthellae symbiosis is a model mutualism whose stability is analyzed using ecological mutualism theory: partner fidelity feedback, sanctions mechanisms, and the optimal foraging trade-off between carbon provision and nitrogen limitation determine when the partnership is evolutionarily stable versus prone to cheating or bleaching.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "In mutualism stability theory, a partnership is evolutionarily stable if the fitness cost c of providing benefits satisfies c < b·r where b is partner benefit and r is relatedness (Hamilton's rule extended to mutualists). For coral-algae systems, the zooxanthellae (Symbiodiniaceae) translocate 90-95",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-thermotolerant-symbiodinium-rescue-reef-systems-climate-change"
      ],
      "communication_gap": "Marine biologists studying reef coral bleaching and theoretical ecologists studying mutualism stability share overlapping models but rarely apply formal stability theory to coral systems; the explicit translation between coral physiology and mutualism game theory was developed in the 2000s but remains underutilized in reef conservation planning and climate impact predictions.\n",
      "translation_table": [
        {
          "field_a_term": "coral bleaching (marine biology)",
          "field_b_term": "collapse of mutualism stability / partner defection (ecology)",
          "note": "Thermal stress breaks the stability condition, causing host to expel the mutualistic symbiont"
        },
        {
          "field_a_term": "zooxanthellae photosynthate translocation (marine biology)",
          "field_b_term": "partner benefit delivery in biological market (ecology)",
          "note": "90%+ translocation rate is the \"wage\" paid by algae; cost to algae must be < fitness benefit"
        },
        {
          "field_a_term": "coral nutrient provisioning (marine biology)",
          "field_b_term": "reciprocal benefit exchange / tit-for-tat (ecology)",
          "note": "Coral provides DIC, N, P to zooxanthellae, creating the reciprocal benefit that stabilizes mutualism"
        },
        {
          "field_a_term": "Symbiodiniaceae clade switching (marine biology)",
          "field_b_term": "partner choice / market competition (ecology)",
          "note": "Corals can shuffle to more thermally tolerant symbionts, analogous to market switching to better partners"
        }
      ],
      "references": [
        {
          "doi": "10.1126/science.aah6716",
          "note": "Hughes et al. (2017) - global warming and recurrent mass bleaching of corals"
        },
        {
          "doi": "10.1073/pnas.0307246101",
          "note": "Sachs & Simms (2006) - mutualism and partner choice mechanisms in symbiosis"
        },
        {
          "doi": "10.1016/j.tree.2010.10.001",
          "note": "Hoeksema & Schwartz (2003) - mutualism stability theory in plant-mycorrhizal systems (applicable framework)"
        }
      ],
      "last_reviewed": "2026-05-07",
      "file": "cross-domain/marine-biology-ecology/b-coral-symbiosis-mutualism-stability-theory.yaml"
    },
    {
      "id": "b-fish-schooling-collective-motion",
      "title": "Fish schooling and bird flocking are active matter phase transitions — the Vicsek model shows that self-propelled particles aligning with neighbors undergo a continuous order-disorder transition at a critical noise threshold, exhibiting long-range order in 2D forbidden by the Mermin-Wagner theorem for equilibrium systems.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "Fish schools (up to 10⁶ individuals), bird flocks (murmurations of starlings), and insect swarms exhibit coherent collective motion emerging from local interaction rules without central coordination. This is a paradigmatic active matter problem.\nThe Vicsek model (1995) is the minimal model: N point ",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-topological-flocking-predator-evasion"
      ],
      "communication_gap": "Marine biologists study fish schooling as an anti-predator behavior and hydrodynamic efficiency problem; statistical physicists study it as an active matter phase transition; fluid dynamicists study it as a CFD problem in collective swimming. The three communities use different formalisms and rarely cite each other. Empirical data on 3D fish tracking at sufficient density to extract interaction rules became available only recently (stereophotogrammetry, computer vision) — enabling comparison with theoretical predictions that have existed since 1995.\n",
      "translation_table": [
        {
          "field_a_term": "individual fish / bird (self-propelled particle)",
          "field_b_term": "Vicsek particle with constant speed v₀"
        },
        {
          "field_a_term": "visual alignment with neighbors",
          "field_b_term": "ferromagnetic-like alignment interaction with range r"
        },
        {
          "field_a_term": "unpredictable predator approach / environmental perturbation",
          "field_b_term": "noise parameter η in Vicsek model"
        },
        {
          "field_a_term": "critical noise threshold (disorder/order transition)",
          "field_b_term": "continuous phase transition in Vicsek model at η_c"
        },
        {
          "field_a_term": "coherent school / murmuration (all moving together)",
          "field_b_term": "ordered flocking phase (Φ → 1)"
        },
        {
          "field_a_term": "dispersed school (panic, fragmentation)",
          "field_b_term": "disordered phase (Φ → 0)"
        },
        {
          "field_a_term": "topological interaction (k = 7 nearest neighbors)",
          "field_b_term": "Voronoi-cell based interaction (graph topology, not metric disk)"
        },
        {
          "field_a_term": "giant number fluctuations in fish school density",
          "field_b_term": "ΔN ~ N^{0.8} anomalous fluctuations in Toner-Tu theory"
        },
        {
          "field_a_term": "hydrodynamic schooling (lateral line, vortex wakes)",
          "field_b_term": "far-field hydrodynamic interactions between active swimmers"
        }
      ],
      "references": [
        {
          "doi": "10.1103/PhysRevLett.75.1226",
          "note": "Vicsek et al. (1995) Phys Rev Lett 75:1226 — original Vicsek model of collective motion"
        },
        {
          "doi": "10.1103/PhysRevLett.75.4326",
          "note": "Toner & Tu (1995) Phys Rev Lett 75:4326 — hydrodynamic theory, long-range order in 2D active matter"
        },
        {
          "doi": "10.1006/jtbi.2002.3065",
          "note": "Couzin et al. (2002) J Theor Biol 218:1 — individual-based model of fish school transitions"
        },
        {
          "doi": "10.1073/pnas.0711437105",
          "note": "Ballerini et al. (2008) PNAS 105:1232 — topological interaction in starling flocks (k ≈ 7)"
        },
        {
          "doi": "10.1126/science.1088295",
          "note": "Liao et al. (2003) Science — fish exploit Kármán vortex street (hydrodynamic schooling benefit)"
        }
      ],
      "last_reviewed": "2026-05-06",
      "file": "cross-domain/marine-biology-fluid-dynamics/b-fish-schooling-collective-motion.yaml"
    },
    {
      "id": "b-antifreeze-proteins-ice-crystal",
      "title": "Antifreeze proteins (AFPs) modify ice crystal habit and inhibit recrystallization by adsorbing to specific ice crystal planes via hydrogen-bond and hydrophobic complementarity, quantified by the Kelvin effect: AFP adsorption on a crystal surface of radius of curvature r raises the local melting point depression ΔT = 2σ*V_m / (ΔH_f * r), creating a thermal hysteresis gap between freezing and melting points",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "AFPs inhibit ice growth by a nanoscale Kelvin effect: AFP molecules adsorb onto specific ice prism, basal, or pyramidal planes through complementary hydrogen-bonding arrays matched to the ice lattice spacing (2.67–4.5 Å), pinning step advancement; between AFP adsorption sites, ice must grow on a cur",
      "open_unknowns": [],
      "related_hypotheses": [],
      "communication_gap": "Biochemists study AFP structure-function relationships and freeze protection in organisms while materials scientists study ice crystal growth and cryopreservation; the Kelvin effect quantification of AFP activity is known in biophysics but the materials science framework for ice crystal habit modification is not broadly integrated with protein engineering efforts.",
      "translation_table": [
        {
          "field_a_term": "antifreeze protein ice-binding surface (biophysics)",
          "field_b_term": "crystallographic lattice-matching template on ice crystal face (materials science)",
          "note": "AFP ice-binding site has hydrogen-bond donors/acceptors spaced to match OH rows of ice Ih at 2.67, 4.50 Å"
        },
        {
          "field_a_term": "thermal hysteresis gap ΔT between freezing and melting (biophysics)",
          "field_b_term": "Kelvin / Gibbs-Thomson curvature undercooling ΔT = 2σT_m*V_m/(ΔH_f*r) (materials science)",
          "note": "AFP pins surface steps so ice must advance as curved front; increased curvature energy inhibits growth"
        },
        {
          "field_a_term": "ice recrystallization inhibition (biophysics)",
          "field_b_term": "Ostwald ripening suppression by grain boundary adsorption (materials science)",
          "note": "AFPs adsorbing on grain boundaries slow coarsening exactly as grain boundary solutes do in metallurgy"
        },
        {
          "field_a_term": "AFP isoform diversity: Type I, II, III, IV, AFPGL (biophysics)",
          "field_b_term": "different crystal plane selectivities and contact-angle distributions (materials science)",
          "note": "Each AFP type targets specific ice crystal faces, producing organism-specific ice crystal morphologies"
        }
      ],
      "references": [
        {
          "doi": "10.1038/ng0905-908",
          "note": "Davies & Hew (1990) - biochemistry of antifreeze proteins: review of structure and function"
        },
        {
          "doi": "10.1073/pnas.96.8.4282",
          "note": "Haymet et al. (1999) - winter flounder antifreeze protein and ice lattice matching"
        },
        {
          "doi": "10.1016/j.bpj.2011.05.070",
          "note": "Garnham et al. (2011) - hyperactive antifreeze protein structure and Kelvin mechanism"
        }
      ],
      "last_reviewed": "2026-05-07",
      "file": "cross-domain/materials-science-biology/b-antifreeze-proteins-ice-crystal.yaml"
    },
    {
      "id": "b-biomineralization-crystal-growth",
      "title": "Biomineralization (bone, shell, tooth formation) obeys the same nucleation and crystal-growth kinetics as inorganic mineralogy — organisms exploit organic templates (proteins, polysaccharides) to control crystal habit, orientation, and polymorph selection, while Ostwald ripening, spinodal decomposition, and Lifshitz-Slyozov-Wagner kinetics govern both biological and synthetic mineral growth.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "Classical nucleation theory (CNT) describes the competition between bulk free energy gain and surface energy penalty when a nucleus forms from a supersaturated solution: ΔG = -n·Δμ + γ·A, giving a critical radius r* = 2γ·v_m / (kT·ln S), where S is supersaturation and γ the interfacial energy. Livin",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-organic-template-polymorph-selection"
      ],
      "communication_gap": "Materials scientists publish in Acta Materialia, Crystal Growth & Design, and J Crystal Growth; structural biologists in J Biol Chem, PNAS, and eLife. The Weiner and Addadi group (Weizmann) has bridged these communities, but most biomineralisation papers still describe crystal control qualitatively (\"the protein inhibits calcite growth\") without computing γ reductions or nucleation barriers quantitatively — the language materials scientists require.\n",
      "translation_table": [
        {
          "field_a_term": "interfacial energy γ (materials science)",
          "field_b_term": "protein–mineral binding free energy (biomineralisation)",
          "note": "Both control the nucleation barrier height"
        },
        {
          "field_a_term": "epitaxial substrate (thin-film deposition)",
          "field_b_term": "organic matrix / collagen template",
          "note": "Periodic template imposes crystal orientation in both cases"
        },
        {
          "field_a_term": "Ostwald ripening / LSW coarsening (r̄ ∝ t^{1/3})",
          "field_b_term": "bone remodelling coarsening (suppressed by osteopontin)"
        },
        {
          "field_a_term": "spinodal decomposition (Cahn-Hilliard)",
          "field_b_term": "amorphous precursor pathway in sea-urchin spicule formation"
        },
        {
          "field_a_term": "polymorph selection by additive (anti-scalant chemistry)",
          "field_b_term": "polymorph selection by acidic shell proteins (calcite vs aragonite)"
        },
        {
          "field_a_term": "critical nucleus radius r* (CNT)",
          "field_b_term": "minimum HAp crystal size tolerated by osteoblasts during bone formation"
        }
      ],
      "references": [
        {
          "url": "https://global.oup.com/academic/product/on-biomineralization-9780195049695",
          "note": "Lowenstam & Weiner (1989) On Biomineralization — Oxford University Press, foundational text"
        },
        {
          "doi": "10.1039/a608571e",
          "note": "Weiner & Addadi (1997) Design strategies in mineralised biological materials, J Mater Chem 7:689"
        },
        {
          "doi": "10.1016/0001-6160(59)90170-1",
          "note": "Cahn (1959) Free energy of a nonuniform system, Acta Metallurgica — spinodal decomposition theory"
        },
        {
          "doi": "10.1126/science.1231819",
          "note": "Gal et al. (2013) Calcite crystal growth by a solid-state transformation of stabilised ACC nanospheres, Science"
        },
        {
          "doi": "10.1107/S0108767396012050",
          "note": "Mann (1993) Molecular tectonics in biomineralization and biomimetic materials chemistry"
        }
      ],
      "last_reviewed": "2026-05-05",
      "file": "cross-domain/materials-science-biology/b-biomineralization-crystal-growth.yaml"
    },
    {
      "id": "b-biomineralization-crystal-nucleation",
      "title": "Organisms direct calcium carbonate, calcium phosphate, and silica crystal nucleation through organic templates and protein matrices that lower the nucleation barrier (ΔG*) — effectively tuning the classical nucleation theory landscape — to produce hierarchically structured biominerals with mechanical properties inaccessible to inorganic synthesis alone.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "Classical nucleation theory gives the free energy barrier ΔG* = 16πγ³/(3ΔG_v²), where γ is the solid–liquid interfacial energy and ΔG_v is the volumetric free energy of crystallization. The nucleation rate J ∝ exp(-ΔG*/kT) is exquisitely sensitive to γ: reducing γ by a factor of 2 reduces ΔG* by a f",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-organic-template-lowers-nucleation-barrier"
      ],
      "communication_gap": "Classical nucleation theory is taught in materials science and chemical engineering but rarely in biology. Biomineralization research is published in specialist journals (J Struct Biol, Biomaterials) that materials scientists rarely read. The quantitative connection between protein matrix chemistry and the CNT parameters γ and ΔG_v has been made in a handful of studies but has not become the default analytical framework for the biomineralization field.\n",
      "translation_table": [
        {
          "field_a_term": "interfacial free energy γ (crystal-liquid surface energy)",
          "field_b_term": "organic template–mineral interface energy (reduced by protein binding)",
          "note": "protein matrices lower γ by 30–60% enabling nucleation at lower supersaturation"
        },
        {
          "field_a_term": "nucleation barrier ΔG* = 16πγ³/(3ΔG_v²)",
          "field_b_term": "threshold for crystal nucleation at biological templating sites",
          "note": "ΔG* controls nucleation rate exponentially; small γ reductions have large rate effects"
        },
        {
          "field_a_term": "polymorph selection (aragonite vs. calcite)",
          "field_b_term": "template-directed crystal phase control in mollusc shell formation",
          "note": "different protein matrices select different CaCO₃ polymorphs with different mechanical properties"
        },
        {
          "field_a_term": "Ostwald ripening (large crystals grow at expense of small ones)",
          "field_b_term": "intracellular amorphous calcium carbonate precursor storage and conversion",
          "note": "biology avoids Ostwald ripening by spatial confinement within vesicles"
        },
        {
          "field_a_term": "crack-tip stress intensity factor K_Ic (fracture toughness)",
          "field_b_term": "nacre brick-and-mortar architecture toughening mechanism",
          "note": ">3000x toughness improvement from hierarchical crack deflection vs. monolithic crystal"
        },
        {
          "field_a_term": "epitaxy (crystal growth on matched substrate)",
          "field_b_term": "stereochemical recognition between protein matrix and crystal face"
        }
      ],
      "references": [
        {
          "note": "Lowenstam & Weiner (1989) — On Biomineralization; Oxford University Press"
        },
        {
          "doi": "10.1039/a703590a",
          "note": "Weiner & Addadi (1997) — Design strategies in mineralized biological materials; J Mater Chem 7:689"
        },
        {
          "doi": "10.1021/cr8002049",
          "note": "Meldrum & Cölfen (2008) — Controlling mineral morphologies and structures in biological and synthetic systems; Chem Rev 108:4332"
        },
        {
          "doi": "10.1016/j.pmatsci.2008.03.001",
          "note": "Meyers et al. (2008) — Biological materials: structure and mechanical properties; Prog Mater Sci 53:1"
        }
      ],
      "last_reviewed": "2026-05-06",
      "file": "cross-domain/materials-science-biology/b-biomineralization-crystal-nucleation.yaml"
    },
    {
      "id": "b-gecko-adhesion-van-der-waals",
      "title": "Gecko adhesion arises from millions of nanoscale setae generating ~10nN van der Waals (dispersion) forces per spatula, with total adhesion (~20N) modeled by JKR contact mechanics (F = 3πwR/2), producing direction-dependent anisotropic and self-cleaning dry adhesion — connecting condensed matter physics (van der Waals interactions) to materials engineering and bio-inspired synthetic adhesives.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "Gecko feet contain ~10^9 keratinous setae (100 μm long, 5 μm diameter) each branching into ~100-1000 spatulae (~200 nm wide, 20 nm thick). Each spatula generates adhesion via van der Waals (London dispersion) forces: F ~ 10nN per spatula. Total foot adhesion ~20N, sufficient to support the gecko's w",
      "open_unknowns": [],
      "related_hypotheses": [],
      "communication_gap": "The gecko adhesion mechanism was debated for 150 years (capillary, suction, microlock hypotheses). Autumn et al. (2000, 2002) definitively established van der Waals mechanism using atomic force microscopy and contact angle measurements on single setae — requiring both biology (gecko handling) and physics (surface forces measurement). Materials scientists developed JKR theory (1971) and synthetic adhesive design completely independently of gecko biology; Geim's nanotube adhesive paper (2003) was the first to explicitly apply gecko knowledge to materials engineering.\n",
      "translation_table": [
        {
          "field_a_term": "van der Waals (London dispersion) force per spatula ~10nN",
          "field_b_term": "Hamaker constant A (material-specific van der Waals interaction energy parameter)",
          "note": "F_vdW = A·R/(6d²) per JKR; Hamaker constant for keratin-silica ~10^-19 J"
        },
        {
          "field_a_term": "JKR pull-off force F = (3/2)πwR",
          "field_b_term": "work of adhesion w = γ₁ + γ₂ - γ₁₂ (surface energies)",
          "note": "JKR theory derived from elasticity + surface energetics; w determines adhesion per contact area"
        },
        {
          "field_a_term": "anisotropic adhesion (strong distally, weak proximally)",
          "field_b_term": "geometric coupling between drag direction and contact area",
          "note": "setae angle ~45° creates direction-dependent detachment — a structural amplification of vdW forces"
        },
        {
          "field_a_term": "self-cleaning (particles transfer from setae to substrate)",
          "field_b_term": "thermodynamic wettability (particle-substrate adhesion > particle-seta adhesion)",
          "note": "Gecko does not have a cleaning mechanism; self-cleaning is a passive thermodynamic consequence"
        }
      ],
      "references": [
        {
          "doi": "10.1038/35015073",
          "note": "Autumn et al. (2000) — Adhesive force of a single gecko foot-hair; Nature 405:681"
        },
        {
          "doi": "10.1073/pnas.192252799",
          "note": "Autumn et al. (2002) — Evidence for van der Waals adhesion in gecko setae; PNAS 99:12252"
        },
        {
          "doi": "10.1098/rspa.1971.0141",
          "note": "Johnson, Kendall & Roberts (1971) — Surface energy and contact of elastic solids; Proc R Soc A 324:301"
        },
        {
          "doi": "10.1038/nmat762",
          "note": "Geim et al. (2003) — Microfabricated adhesive mimicking gecko foot-hair; Nat Mater 2:461"
        }
      ],
      "last_reviewed": "2026-05-06",
      "file": "cross-domain/materials-science-biology/b-gecko-adhesion-van-der-waals.yaml"
    },
    {
      "id": "b-phase-diagrams-alloy-design",
      "title": "Binary and multi-component alloy phase diagrams are thermodynamic predictions of Gibbs free energy minimization — CALPHAD parameterizes G(T,x) from sublattice models, and high-entropy alloys exploit large configurational entropy ΔS_mix = −R Σxᵢ ln(xᵢ) to stabilize single-phase solid solutions.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "Phase diagrams are maps of thermodynamic equilibrium: for a given composition and temperature, which phase (or mixture of phases) minimizes the total Gibbs free energy G = H − TS? The phase boundary locations follow from the common tangent construction on G(x) curves. This bridges thermodynamic chem",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-high-entropy-alloy-configurational-entropy-stabilization"
      ],
      "communication_gap": "CALPHAD was developed by metallurgists (Kaufman, Hillert, Pelton) and is taught primarily in materials science and metallurgical engineering programs. Chemists studying solution thermodynamics and physicists studying phase transitions use the same Gibbs free energy formalism but different parameterizations. The HEA field emerged partly because materials scientists recognized entropy arguments from statistical thermodynamics — a cross-fertilization that happened within materials science but is still not widely known in physical chemistry departments.\n",
      "translation_table": [
        {
          "field_a_term": "Gibbs free energy G = H − TS",
          "field_b_term": "phase stability criterion — phase with lowest G is stable",
          "note": "Phase boundaries are loci where ΔG = 0 between two competing phases"
        },
        {
          "field_a_term": "common tangent construction",
          "field_b_term": "phase coexistence rule (two-phase region in phase diagram)",
          "note": "Tie lines connect phases in equilibrium at the same chemical potential"
        },
        {
          "field_a_term": "Hume-Rothery size rule (<15% radius difference)",
          "field_b_term": "elastic strain energy ΔH_elastic from size mismatch",
          "note": "Derived from continuum elasticity theory for substitutional defect"
        },
        {
          "field_a_term": "CALPHAD Redlich-Kister interaction parameter L_k",
          "field_b_term": "excess Gibbs energy of mixing (deviation from ideal solution)",
          "note": "Fitted to binary experiments; enables ternary/quaternary predictions"
        },
        {
          "field_a_term": "configurational entropy ΔS_mix = −R Σxᵢ ln(xᵢ)",
          "field_b_term": "HEA stabilization: −TΔS_mix lowers G of solid solution",
          "note": "At high T, entropy term dominates; at low T, enthalpy terms may drive phase separation"
        },
        {
          "field_a_term": "martensite transformation",
          "field_b_term": "diffusionless first-order phase transition (military transformation)",
          "note": "Shear-dominated; Bain correspondence relates parent and product crystal structures"
        }
      ],
      "references": [
        {
          "note": "Hume-Rothery (1926) J Inst Met 35:295 — empirical rules for solid solution formation"
        },
        {
          "note": "Saunders & Miodownik (1998) CALPHAD: A Comprehensive Guide. Pergamon Press."
        },
        {
          "doi": "10.1016/j.msea.2003.10.257",
          "note": "Cantor et al. (2004) Mater Sci Eng A 375:213 — first report of CrMnFeCoNi HEA"
        },
        {
          "doi": "10.1038/s41578-019-0121-4",
          "note": "George et al. (2019) Nat Rev Mater 4:515 — high-entropy alloys review"
        }
      ],
      "last_reviewed": "2026-05-06",
      "file": "cross-domain/materials-science-chemistry/b-phase-diagrams-alloy-design.yaml"
    },
    {
      "id": "b-fracture-mechanics-griffith",
      "title": "Griffith's fracture criterion bridges atomic surface energy (materials science) and macroscopic structural failure (engineering) by equating the elastic strain energy release rate to the cost of creating new crack surfaces.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "Griffith (1921) derived the critical stress for crack propagation: σ_f = √(2Eγ/πa), where E is Young's modulus, γ is specific surface energy, and a is half-crack length. This equates the macroscopic (continuum mechanics) energy release rate G = πσ²a/E to the microscopic (atomic) surface creation cos",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-griffith-crack-2d-material-defects"
      ],
      "communication_gap": "Griffith's original paper went largely unread for 30 years because it appeared in a pure mathematics journal. The Irwin-Orowan reformulation brought it to engineers. Today materials scientists measure γ without connecting it to K_Ic, and engineers use K_Ic without knowing the atomic origin — the bridge between scales is known but not routinely taught as a single coherent framework.\n",
      "translation_table": [
        {
          "field_a_term": "specific surface energy γ (J/m²)",
          "field_b_term": "material's resistance to crack initiation (materials science)",
          "note": "Atomic bonding energy per unit area; varies by crystallography and environment"
        },
        {
          "field_a_term": "stress intensity factor K_I",
          "field_b_term": "loading severity at crack tip (engineering)",
          "note": "Combines applied stress and crack geometry; universal across geometries at same K"
        },
        {
          "field_a_term": "fracture toughness K_Ic (MPa·m^0.5)",
          "field_b_term": "critical K_I at fracture; material property",
          "note": "The bridge quantity: determined by materials science, used directly in engineering design"
        },
        {
          "field_a_term": "Paris law exponent m",
          "field_b_term": "fatigue crack growth rate per load cycle",
          "note": "Empirical law; m=2-4 for metals; set by microstructure and crack-tip plasticity"
        }
      ],
      "references": [
        {
          "doi": "10.1098/rsta.1921.0006",
          "note": "Griffith, A.A. (1921). The phenomena of rupture and flow in solids. Phil Trans R Soc A 221:163."
        },
        {
          "note": "Irwin, G.R. (1957). Analysis of stresses and strains near the end of a crack traversing a plate. J Appl Mech 24:361."
        },
        {
          "doi": "10.1115/1.3656900",
          "note": "Paris, P. & Erdogan, F. (1963). A critical analysis of crack propagation laws. J Basic Eng 85:528."
        },
        {
          "note": "Anderson, T.L. (2017). Fracture Mechanics: Fundamentals and Applications (4th ed.). CRC Press."
        }
      ],
      "last_reviewed": "2026-05-06",
      "file": "cross-domain/materials-science-engineering/b-fracture-mechanics-griffith.yaml"
    },
    {
      "id": "b-active-learning-x-bayesian-optimization-alloy-discovery",
      "title": "Active learning with Bayesian optimization bridges sample-efficient acquisition and experimental alloy discovery loops.",
      "source_domain": "",
      "target_domain": "",
      "status": "proposed",
      "bridge_claim": "Speculative analogy (to be empirically validated): Bayesian-optimization acquisition policies can function as adaptive design rules analogous to sequential alloy-screening heuristics in autonomous materials labs.",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-active-learning-bayesian-optimization-improves-alloy-hit-rate"
      ],
      "communication_gap": "Domain operators prioritize interpretable reliability diagnostics, while ML work often prioritizes aggregate accuracy without deployment-grade uncertainty audits.",
      "translation_table": [
        {
          "field_a_term": "model prior",
          "field_b_term": "domain prior",
          "note": "Both constrain inference in data-sparse regimes."
        },
        {
          "field_a_term": "uncertainty estimate",
          "field_b_term": "risk-aware decision support",
          "note": "Uncertainty quality determines practical utility."
        },
        {
          "field_a_term": "out-of-distribution behavior",
          "field_b_term": "deployment robustness",
          "note": "Shift sensitivity governs real-world reliability."
        }
      ],
      "references": [
        {
          "url": "https://www.nature.com/articles/s41524-019-0153-6",
          "note": "Bayesian optimization for materials design."
        }
      ],
      "last_reviewed": "2026-05-08",
      "file": "cross-domain/materials-science-machine-learning/b-active-learning-x-bayesian-optimization-alloy-discovery.yaml"
    },
    {
      "id": "b-crystallography-group-theory",
      "title": "The 230 space groups classifying all possible crystal symmetries are a complete enumeration of discrete subgroups of the Euclidean group in 3D; quasicrystals (Shechtman 1984) require the mathematics of aperiodic tilings, extending the connection to non-crystallographic point groups.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "Every crystal is characterised by its space group — one of exactly 230 discrete subgroups of the Euclidean group E(3) in three dimensions. This is a theorem of mathematics (proved independently by Fedorov 1891, Schoenflies 1891, and Barlow 1894) and is the foundation of all crystallography: diffract",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-crystallographic-protein-folding"
      ],
      "communication_gap": "Crystallographers learn the 230 space groups as empirical tables, not as theorems of group theory. Mathematicians studying discrete subgroups of Euclidean groups rarely read materials science journals. The physical chemist learning crystal field theory, the condensed matter physicist deriving band structures, and the pure mathematician studying lattice groups are all working with the same mathematical objects but rarely interact. Penrose's original work on aperiodic tilings appeared in a mathematics journal (Bulletin of the Institute of Mathematics and its Applications) and was unknown to materials scientists until Shechtman's experimental discovery.\n",
      "translation_table": [
        {
          "field_a_term": "Crystal lattice (periodic arrangement of atoms)",
          "field_b_term": "Discrete subgroup of the Euclidean group E(3)",
          "note": "Bravais lattice = translational subgroup; point group = rotational subgroup"
        },
        {
          "field_a_term": "Space group (230 types)",
          "field_b_term": "Discrete crystallographic group in 3D",
          "note": "Complete enumeration by Schoenflies (1891) and Fedorov (1891)"
        },
        {
          "field_a_term": "Neumann's principle (crystal properties reflect symmetry)",
          "field_b_term": "Representation theory: physical observables transform as irreps",
          "note": "Tensor properties vanish unless they span the totally symmetric irrep"
        },
        {
          "field_a_term": "Diffraction selection rules (allowed/forbidden reflections)",
          "field_b_term": "Group-theory extinction rules for reciprocal lattice vectors",
          "note": "Systematic absences directly encode the space group"
        },
        {
          "field_a_term": "Quasicrystal (5-fold icosahedral symmetry)",
          "field_b_term": "Aperiodic tiling (Penrose tiles, de Bruijn pentagrids)",
          "note": "Long-range order without translational periodicity; described by 6D space groups"
        },
        {
          "field_a_term": "Phase transition (symmetry breaking)",
          "field_b_term": "Group–subgroup relationship (Landau theory)",
          "note": "Continuous transition allowed only if lower-symmetry group is subgroup of higher-symmetry group"
        }
      ],
      "references": [
        {
          "note": "Schoenflies (1891) Krystallsysteme und Krystallstructur — 230 space groups"
        },
        {
          "doi": "10.1103/PhysRevLett.53.1951",
          "note": "Shechtman et al. (1984) Phys Rev Lett 53:1951 — icosahedral quasicrystal discovery"
        },
        {
          "note": "Penrose (1974) Bull Inst Math Appl 10:266 — aperiodic tilings"
        },
        {
          "note": "Burns & Glazer (2013) Space Groups for Solid State Scientists, 3rd ed. (Academic Press)"
        },
        {
          "note": "Hahn (ed.) (2006) International Tables for Crystallography Vol. A — definitive space group tables"
        }
      ],
      "last_reviewed": "2026-05-06",
      "file": "cross-domain/materials-science-mathematics/b-crystallography-group-theory.yaml"
    },
    {
      "id": "b-piezoelectricity-symmetry-breaking",
      "title": "Piezoelectricity requires broken centrosymmetry: group-theoretic analysis of crystal point groups identifies the 20 of 32 point groups that allow the piezoelectric tensor d_{ijk} to be non-zero",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "The piezoelectric tensor d_ijk relates mechanical stress σ_jk to electric polarization P_i: P_i = d_ijk · σ_jk. For d_ijk to be non-zero, the crystal must lack an inversion center (broken centrosymmetry). Group representation theory determines which tensor components are non-zero: the tensor must be",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-piezoelectricity-symmetry-breaking"
      ],
      "communication_gap": "Materials scientists and device engineers work with piezoelectric crystals using empirical d_ijk tables without connecting the non-zero entries to their group- theoretic origin. Mathematicians who study representation theory of finite groups have the tools to systematically enumerate all allowed tensor structures for all point groups but rarely publish in materials science journals.\n",
      "translation_table": [
        {
          "field_a_term": "crystal point group G (crystallographic symmetry)",
          "field_b_term": "finite group acting on tensor representation space",
          "note": "Piezoelectricity iff the trivial representation appears in d_ijk ⊗ G-action decomposition"
        },
        {
          "field_a_term": "ferroelectric phase transition at Curie temperature T_c",
          "field_b_term": "spontaneous symmetry breaking from high-symmetry to low-symmetry subgroup",
          "note": "Above T_c: cubic (Oh symmetry, centrosymmetric); below T_c: tetragonal (C_4v, polar)"
        },
        {
          "field_a_term": "piezoelectric coefficient d_33 (C/N)",
          "field_b_term": "magnitude of symmetry-allowed tensor component in z-direction",
          "note": "d_33 for PZT ~ 250-600 pC/N; zero for any material with inversion center"
        },
        {
          "field_a_term": "poling direction in a polycrystalline piezoelectric",
          "field_b_term": "external field that selects among degenerate symmetry-broken minima",
          "note": "Poling aligns ferroelectric domains, breaking macroscopic centrosymmetry artificially"
        }
      ],
      "references": [
        {
          "doi": "10.1103/RevModPhys.77.1083",
          "note": "Rabe, Ahn & Triscone (2007) Physics of Ferroelectrics — from Landau theory to first-principles. Springer"
        },
        {
          "doi": "10.1063/1.555906",
          "note": "Nye (1985) Physical Properties of Crystals. Oxford UP — standard reference for tensor properties and group theory"
        },
        {
          "doi": "10.1126/science.280.5367.1238",
          "note": "Wu & Cheong (1998) Piezoelectric materials for acoustic devices. Science 280:1238"
        }
      ],
      "last_reviewed": "2026-05-07",
      "file": "cross-domain/materials-science-mathematics/b-piezoelectricity-symmetry-breaking.yaml"
    },
    {
      "id": "b-preisach-hysteresis-model",
      "title": "The Preisach model represents any rate-independent hysteretic material as a superposition of elementary bistable switches (hysterons), mapping hysteresis loops to a weight distribution rho(alpha,beta) that can be identified from first-order reversal curves",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "A ferromagnetic material's magnetization M(H) is described by M = double_integral_{alpha>=beta} rho(alpha,beta) * gamma_{alpha,beta}[H] d_alpha d_beta, where gamma_{alpha,beta} are relay operators switching between +1 and -1 at fields alpha (up-switch) and beta (down-switch), and rho is identified f",
      "open_unknowns": [],
      "related_hypotheses": [],
      "communication_gap": "Materials scientists measure hysteresis loops experimentally while mathematicians study Preisach operators as functional analysis objects; the FORC technique for identifying the Preisach density is established in rock magnetism but underused in engineering materials characterization.",
      "translation_table": [
        {
          "field_a_term": "hysteresis loop M(H)",
          "field_b_term": "output of Preisach operator: superposition of relay operators",
          "note": "Any rate-independent hysteresis with congruency and wiping-out properties is representable by Preisach model"
        },
        {
          "field_a_term": "first-order reversal curve (FORC)",
          "field_b_term": "partial derivative d^2 M / d_Ha d_Hb = rho(alpha,beta)",
          "note": "FORCs directly measure the Preisach density; FORC diagram visualizes magnetic interaction and coercivity distribution"
        },
        {
          "field_a_term": "magnetic coercivity distribution",
          "field_b_term": "projection of rho(alpha,beta) onto alpha-beta diagonal",
          "note": "Coercivity field H_c = (alpha-beta)/2; interaction field H_u = (alpha+beta)/2"
        },
        {
          "field_a_term": "return-point memory in disordered magnets",
          "field_b_term": "wiping-out property of Preisach model: minor loops erase prior history",
          "note": "Preisach model predicts that state is determined only by most recent field extrema, not full history"
        }
      ],
      "references": [
        {
          "doi": "10.1007/978-3-662-04726-6",
          "note": "Mayergoyz (2003) Mathematical Models of Hysteresis - definitive Preisach model reference"
        },
        {
          "doi": "10.1029/JB091iB12p12497",
          "note": "Pike et al. (1999) J Geophys Res - FORC diagrams for characterizing fine magnetic particle systems"
        },
        {
          "doi": "10.1103/PhysRevLett.64.1973",
          "note": "Sethna et al. (1993) PRL - hysteresis, avalanches, and Barkhausen noise in disordered magnets"
        }
      ],
      "last_reviewed": "2026-05-07",
      "file": "cross-domain/materials-science-mathematics/b-preisach-hysteresis-model.yaml"
    },
    {
      "id": "b-topological-persistence-x-materials-microstructure-failure-forecast",
      "title": "Persistent homology links microstructure topology to early failure forecasting in structural materials.",
      "source_domain": "",
      "target_domain": "",
      "status": "proposed",
      "bridge_claim": "Speculative analogy: Topological persistence summaries of pore and crack networks can act as scale-robust precursors of mechanical failure, analogous to topological biomarkers in physiological signals.",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-persistent-h1-betti-curves-predict-material-failure-earlier-than-stress-thresholds"
      ],
      "communication_gap": "Communities use different terminology and validation conventions, masking transferable method equivalence.",
      "translation_table": [],
      "references": [
        {
          "doi": "10.1007/s00454-002-2885-2",
          "note": "Persistent homology foundations."
        }
      ],
      "last_reviewed": "2026-05-08",
      "file": "cross-domain/materials-science-mathematics/b-topological-persistence-x-materials-microstructure-failure-forecast.yaml"
    },
    {
      "id": "b-peridynamics-nonlocal-fracture-x-bone-microdamage-remodeling",
      "title": "Peridynamic nonlocal fracture mechanics offers a direct formalism for bone microdamage accumulation and remodeling triggers.",
      "source_domain": "",
      "target_domain": "",
      "status": "proposed",
      "bridge_claim": "Speculative analogy: Peridynamic nonlocal fracture mechanics offers a direct formalism for bone microdamage accumulation and remodeling triggers.",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-peridynamic-models-predict-bone-microdamage-hotspots-before-radiographic-failure"
      ],
      "communication_gap": "The two communities use different notation, benchmarks, and publication venues, which obscures transferable mathematical structure.",
      "translation_table": [],
      "references": [
        {
          "doi": "10.1007/s00466-007-0170-5",
          "note": "Peridynamic fracture modeling."
        }
      ],
      "last_reviewed": "2026-05-08",
      "file": "cross-domain/materials-science-medicine/b-peridynamics-nonlocal-fracture-x-bone-microdamage-remodeling.yaml"
    },
    {
      "id": "b-biofilm-mechanics-viscoelastic-polymer",
      "title": "Bacterial biofilms are viscoelastic materials whose mechanical properties — creep compliance, stress relaxation, and frequency-dependent storage and loss moduli — are quantitatively described by the same polymer network models (Kelvin-Voigt, Maxwell, and power-law viscoelasticity) used for synthetic hydrogels and extracellular matrix, with the crosslinked extracellular polymeric substance (EPS) network playing the role of the polymer matrix",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "Biofilm EPS forms a physically crosslinked polymer network whose linear viscoelastic response G*(omega) = G'(omega) + i*G''(omega) shows a plateau modulus G_0 ~ 10–1000 Pa at intermediate frequencies (crossover between elastic and viscous regimes at frequency omega_c = G_0/eta), identical in functio",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-biofilm-eps-crosslink-dispersal-threshold"
      ],
      "communication_gap": "Microbiologists characterize biofilm virulence and EPS composition biochemically while materials scientists measure polymer network rheology; quantitative rheological characterization of biofilms using oscillatory shear was pioneered by Stoodley's group but is not standard in clinical microbiology labs that evaluate biofilm susceptibility to treatment.",
      "translation_table": [
        {
          "field_a_term": "extracellular polymeric substance (EPS) network in biofilm (microbiology)",
          "field_b_term": "crosslinked polymer network / hydrogel matrix (materials science)",
          "note": "EPS is a physically crosslinked biopolymer network; elastic modulus, mesh size, and crosslink density are the same parameters as synthetic hydrogels"
        },
        {
          "field_a_term": "biofilm creep under constant stress (microbiology)",
          "field_b_term": "viscoelastic creep compliance J(t) = (1/G_0)(1 - e^{-t/tau}) (materials science)",
          "note": "Biofilm creep fits Kelvin-Voigt model; tau = eta/G_0 is the retardation time for stress-induced deformation"
        },
        {
          "field_a_term": "biofilm dispersion under increased flow (microbiology)",
          "field_b_term": "shear-induced network rupture / yielding at stress > sigma_y (materials science)",
          "note": "Biofilm dispersal occurs when hydrodynamic shear exceeds yield stress; sigma_y is determined by EPS crosslink density"
        },
        {
          "field_a_term": "biofilm frequency-dependent storage modulus G'(omega) (microbiology)",
          "field_b_term": "plateau modulus and rubbery plateau of polymer network (materials science)",
          "note": "G_0 plateau in biofilm oscillatory rheology is formally equivalent to the rubber plateau in polymers"
        }
      ],
      "references": [
        {
          "doi": "10.1007/s00253-001-0910-2",
          "note": "Stoodley et al. (2002) - biofilms as complex differentiated communities (viscoelastic behavior described)"
        },
        {
          "doi": "10.1039/b602702f",
          "note": "Shaw et al. (2004) - experimental observations of biofilm growth and mechanical properties using MRI (rheology)"
        },
        {
          "doi": "10.1128/AEM.00867-12",
          "note": "Lieleg et al. (2011) - charge-selective transport of proteins through bacterial biofilms"
        }
      ],
      "last_reviewed": "2026-05-07",
      "file": "cross-domain/materials-science-microbiology/b-biofilm-mechanics-viscoelastic-polymer.yaml"
    },
    {
      "id": "b-classical-nucleation-theory",
      "title": "Classical nucleation theory predicts the rate of crystal formation from supersaturated solutions as J = A * exp(-Delta-G*/kT), where the nucleation barrier Delta-G* = 16*pi*gamma^3 / (3*Delta-g_v^2) balances surface energy against volumetric driving force",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "Crystal nucleation rate from a supersaturated melt is J = Z * f * C0 * exp(-Delta-G*/kT), where the thermodynamic barrier Delta-G* = 16*pi*gamma^3/(3*Delta-g_v^2) is derived from competing surface free energy (gamma, favors dissolution) and volumetric free energy gain (Delta-g_v, favors growth), pre",
      "open_unknowns": [],
      "related_hypotheses": [],
      "communication_gap": "Materials scientists measure nucleation rates experimentally while thermodynamicists derive CNT from capillarity approximations; CNT often fails by orders of magnitude for small clusters where the capillarity approximation breaks down, and the gap between theory and experiment is acknowledged but not resolved.",
      "translation_table": [
        {
          "field_a_term": "critical nucleus radius r* = 2*gamma / Delta-g_v",
          "field_b_term": "thermodynamic saddle point in free energy landscape",
          "note": "r* is the smallest stable nucleus; subcritical clusters dissolve, supercritical clusters grow spontaneously"
        },
        {
          "field_a_term": "nucleation barrier Delta-G* = (4/3)*pi*r*^2 * gamma",
          "field_b_term": "activation energy in Arrhenius nucleation rate",
          "note": "Delta-G* scales as gamma^3/Delta-g_v^2; small changes in interfacial energy gamma have cubic effect on barrier"
        },
        {
          "field_a_term": "Zeldovich factor Z",
          "field_b_term": "accounts for curvature of free energy near saddle point",
          "note": "Z ~ 0.01-0.1 for typical systems; often treated as a prefactor adjustment"
        },
        {
          "field_a_term": "supersaturation ratio S = c/c_eq",
          "field_b_term": "Delta-g_v = kT * ln(S) per molecule",
          "note": "Higher supersaturation increases Delta-g_v, lowering r* and Delta-G*; nucleation rate increases exponentially"
        }
      ],
      "references": [
        {
          "doi": "10.1039/cs9908900321",
          "note": "Mullin (2001) Crystallization - comprehensive treatment of classical nucleation theory"
        },
        {
          "doi": "10.1103/RevModPhys.84.759",
          "note": "Sosso et al. (2016) - crystal nucleation in liquids: open questions and future challenges"
        },
        {
          "doi": "10.1126/science.1167641",
          "note": "Vekilov (2010) - two-step mechanism for the nucleation of crystals from solution"
        }
      ],
      "last_reviewed": "2026-05-07",
      "file": "cross-domain/materials-science-physics/b-classical-nucleation-theory.yaml"
    },
    {
      "id": "b-fracture-griffith-statistical",
      "title": "The Griffith fracture criterion (K_I = K_Ic at the crack tip) is the deterministic limit of a statistical-physics crack nucleation problem: the disorder-averaged fracture strength of heterogeneous materials follows a Weibull extreme-value distribution, and the brittle-to-ductile transition maps onto a depinning phase transition in the random-field Ising model universality class.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "Griffith (1921) showed that fracture occurs when the elastic strain energy released by crack propagation (G = K²/E') equals the surface energy cost (2γ): K_Ic = √(2Eγ/π). This deterministic criterion applies to a perfect crystal. Real materials contain quenched disorder (voids, inclusions, grain bou",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-fracture-depinning-crackling-noise-exponent"
      ],
      "communication_gap": "Fracture mechanics engineers use stress intensity factors (K) and Weibull statistics empirically without connection to the universality class language of statistical physics. The depinning/crackling-noise framework (Zapperi, Alava) is published in Physical Review Letters / Physical Review E but not in engineering fracture journals (Engineering Fracture Mechanics, J. Mechanics Physics Solids).\n",
      "translation_table": [
        {
          "field_a_term": "Griffith critical stress intensity K_Ic",
          "field_b_term": "Depinning threshold force in a disordered medium",
          "note": "Crack advances when local K_I exceeds K_Ic; equivalent to a driven interface overcoming pinning"
        },
        {
          "field_a_term": "Material disorder (voids, inclusions)",
          "field_b_term": "Random pinning field in elastic manifold models",
          "note": "Quenched disorder sets the pinning landscape that crack front must traverse"
        },
        {
          "field_a_term": "Weibull modulus m (width of strength distribution)",
          "field_b_term": "Disorder strength in random-field Ising model",
          "note": "High m → low disorder → deterministic Griffith limit; low m → broad statistical failure distribution"
        },
        {
          "field_a_term": "Acoustic emission avalanches during fracture",
          "field_b_term": "Crackling noise / Barkhausen noise in depinning transitions",
          "note": "Power-law size distribution of AE events = crackling noise universality class"
        }
      ],
      "references": [
        {
          "doi": "10.1098/rsta.1921.0006",
          "note": "Griffith (1921) – the phenomena of rupture and flow in solids; foundational fracture criterion"
        },
        {
          "doi": "10.1103/PhysRevLett.78.1408",
          "note": "Zapperi et al. (1997) PRL – crack propagation as a depinning transition; crackling noise"
        },
        {
          "doi": "10.1126/science.288.5469.1275",
          "note": "Sethna et al. (2001) Science – crackling noise: universal power laws in fracture, magnets, earthquakes"
        },
        {
          "doi": "10.1103/PhysRevE.62.6164",
          "note": "Alava et al. – fiber bundle model: brittle fracture and Weibull statistics"
        }
      ],
      "last_reviewed": "2026-05-07",
      "file": "cross-domain/materials-science-physics/b-fracture-griffith-statistical.yaml"
    },
    {
      "id": "b-hydrogel-polymer-network-mechanics",
      "title": "Hydrogel mechanical properties are quantitatively predicted by rubber elasticity and Flory-Rehner theory, where the elastic modulus G = n*k*T (n = effective crosslink density) and swelling equilibrium balances elastic energy against polymer-solvent mixing free energy",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "The equilibrium swelling ratio Q and shear modulus G of a crosslinked hydrogel are jointly determined by the Flory-Rehner equations: G = n*k*T*Q^{1/3} (rubber elasticity) and mu_solvent = RT[ln(1-v2) + v2 + chi*v2^2 + v_e*(v2^{1/3}/2 - v2)] = 0, where n is crosslink density, chi is the Flory-Huggins",
      "open_unknowns": [],
      "related_hypotheses": [],
      "communication_gap": "Materials scientists characterize hydrogel mechanics by measuring modulus and swelling while polymer physicists derive thermodynamic theory; the quantitative connection between molecular parameters (chi, crosslink density) and macroscopic mechanical properties (G, fracture toughness) is known in polymer theory but not systematically applied in biomaterial hydrogel design.",
      "translation_table": [
        {
          "field_a_term": "hydrogel swelling ratio Q = V_swollen / V_dry",
          "field_b_term": "polymer volume fraction v2 = 1/Q at equilibrium",
          "note": "Q is controlled by crosslink density and chi parameter; Q ~ 10-100 for typical hydrogels"
        },
        {
          "field_a_term": "elastic shear modulus G of swollen hydrogel",
          "field_b_term": "G = n*k*T*Q^{-1/3} from affine network model",
          "note": "G decreases with swelling; n is strand density between crosslinks in dry state"
        },
        {
          "field_a_term": "fracture toughness of hydrogel",
          "field_b_term": "lake-thomas tearing energy Gc = n * U * l0 * sqrt(n)",
          "note": "Fracture requires breaking all chains crossing the crack plane; Gc scales as sqrt(strand length)"
        },
        {
          "field_a_term": "Flory-Huggins chi parameter",
          "field_b_term": "polymer-solvent interaction free energy per monomer",
          "note": "chi < 0.5 gives good solvent (swelling); chi > 0.5 gives poor solvent (collapse); chi is temperature-dependent"
        }
      ],
      "references": [
        {
          "doi": "10.1002/app.1953.070070308",
          "note": "Flory & Rehner (1953) J Appl Phys - thermodynamic theory of rubber elasticity and swelling"
        },
        {
          "doi": "10.1126/science.1241214",
          "note": "Gong et al. (2010) Science - double network hydrogels with extraordinary toughness"
        },
        {
          "doi": "10.1039/c4sm00269e",
          "note": "Creton (2017) Macromolecules - tough hydrogels: review of physical and chemical approaches"
        }
      ],
      "last_reviewed": "2026-05-07",
      "file": "cross-domain/materials-science-physics/b-hydrogel-polymer-network-mechanics.yaml"
    },
    {
      "id": "b-phonon-boltzmann-thermal-transport",
      "title": "Thermal conductivity of crystalline solids is quantitatively predicted by the phonon Boltzmann transport equation (BTE): κ = (1/3)∫C(ω)v(ω)λ(ω)dω, where acoustic phonons are the heat carriers and three-phonon Umklapp scattering is the primary resistive process, directly connecting lattice dynamics to macroscopic heat flow.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "Phonons—quantised lattice vibrations—carry heat in insulators and semiconductors exactly as molecules carry heat in gases. The phonon BTE (Peierls 1929) describes their out-of-equilibrium distribution under a temperature gradient: ∂n_λ/∂t + v_λ·∇T(∂n_λ/∂T) = (∂n_λ/∂t)|_scatt. Thermal conductivity κ ",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-phonon-mfp-spectrum-thermal-conductivity-engineering"
      ],
      "communication_gap": "Kinetic theorists and materials scientists use different notations (kinetic theory uses collision integrals; solid-state physics uses second quantisation). The equivalence was established by Peierls (1929) and Callaway (1959) but textbook treatments of each field rarely cross-reference the other.\n",
      "translation_table": [
        {
          "field_a_term": "Phonon mode λ = (q, s) (wavevector, branch)",
          "field_b_term": "Gas molecule in kinetic theory",
          "note": "Both obey Bose-Einstein (phonons) / Maxwell-Boltzmann (classical) equilibrium distributions"
        },
        {
          "field_a_term": "Phonon group velocity v_λ = ∂ω/∂q",
          "field_b_term": "Molecular velocity in kinetic theory",
          "note": "Determines the diffusion rate of heat; dispersive bands give wavelength-dependent velocities"
        },
        {
          "field_a_term": "Phonon-phonon Umklapp scattering (three-phonon)",
          "field_b_term": "Molecular collision in kinetic theory (momentum-non-conserving)",
          "note": "Umklapp restores crystal momentum with a reciprocal lattice vector G ≠ 0, providing thermal resistance"
        },
        {
          "field_a_term": "Phonon mean free path λ = v_λ τ_λ",
          "field_b_term": "Mean free path between molecular collisions",
          "note": "Both set the length scale over which heat diffuses; nano-structuring below λ reduces κ"
        }
      ],
      "references": [
        {
          "doi": "10.1103/PhysRevLett.99.185901",
          "note": "Broido et al. (2007) PRL – first-principles phonon BTE gives κ for Si and Ge within 5% of experiment"
        },
        {
          "doi": "10.1103/PhysRev.113.1046",
          "note": "Callaway (1959) PR – model thermal conductivity from phonon relaxation times"
        },
        {
          "doi": "10.1016/j.physrep.2018.02.004",
          "note": "McGaughey et al. (2019) Physics Reports – phonon transport in nanostructured materials"
        },
        {
          "doi": "10.1038/s41578-018-0018-2",
          "note": "Snyder & Toberer – complex thermoelectric materials; phonon engineering"
        }
      ],
      "last_reviewed": "2026-05-07",
      "file": "cross-domain/materials-science-physics/b-phonon-boltzmann-thermal-transport.yaml"
    },
    {
      "id": "b-phonons-thermal-conductivity",
      "title": "Phonons and thermal conductivity — quantized lattice vibrations are the primary heat carriers in non-metallic solids and govern thermoelectric efficiency and CPU thermal management",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "Phonons (quanta of lattice vibration, analogous to photons as quanta of light) are the dominant heat carriers in non-metallic solids. Thermal conductivity κ = (1/3)Cvl where C is volumetric heat capacity, v is mean phonon group velocity, and l is phonon mean free path. The phonon dispersion relation",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-phonon-glass-electron-crystal-zt-optimization"
      ],
      "communication_gap": "Phonon physics is taught in solid state physics courses but rarely connected explicitly to thermal engineering education, which often uses empirical κ values without the underlying physical model. The thermoelectric materials community (materials science / chemistry) and the phonon physics community (condensed matter physics) overlap but use different language. The CPU cooling community largely treats thermal conductivity as a bulk material property without engineering the phonon spectrum.\n",
      "translation_table": [
        {
          "field_a_term": "phonon dispersion ω(k)",
          "field_b_term": "band structure of thermal transport (analogous to electron bands)",
          "note": "Acoustic branches carry most heat; optical branches carry less due to low group velocity"
        },
        {
          "field_a_term": "Normal (N) process scattering (momentum conserved)",
          "field_b_term": "elastic scattering (momentum conserving collisions)",
          "note": "N-processes alone do not limit κ; they are thermal but not resistive"
        },
        {
          "field_a_term": "Umklapp (U) process (momentum transferred to lattice)",
          "field_b_term": "resistive phonon-phonon scattering (thermal resistance)",
          "note": "U-processes give κ ∝ exp(θ_D/aT) at low T, ∝ T⁻¹ at high T"
        },
        {
          "field_a_term": "mean free path l (average phonon propagation distance)",
          "field_b_term": "electron mean free path in electrical conductivity",
          "note": "Both determined by scattering rate; engineering l via nanostructuring is the thermoelectric design strategy"
        },
        {
          "field_a_term": "phonon-electron coupling constant λ",
          "field_b_term": "BCS superconducting pairing strength",
          "note": "Strong phonon-electron coupling → high T_c; same constant determines electrical resistivity"
        },
        {
          "field_a_term": "phononic crystal (periodic nanostructure)",
          "field_b_term": "photonic crystal (periodic optical structure)",
          "note": "Analogous gap engineering: phononic bandgap suppresses thermal conductivity modes"
        }
      ],
      "references": [
        {
          "note": "Debye (1912) Ann Phys 344:789 — phonon model and T³ heat capacity"
        },
        {
          "note": "Peierls (1929) Ann Phys 395:1055 — Umklapp scattering and thermal resistance theory"
        },
        {
          "doi": "10.1063/1.1524305",
          "note": "Cahill et al. (2003) J Appl Phys 93:793 — nanoscale thermal transport review"
        },
        {
          "doi": "10.1038/nmat2090",
          "note": "Snyder & Toberer (2008) Nat Mater 7:105 — complex thermoelectric materials and phonon engineering"
        }
      ],
      "last_reviewed": "2026-05-06",
      "file": "cross-domain/materials-science-physics/b-phonons-thermal-conductivity.yaml"
    },
    {
      "id": "b-bcs-superconductivity",
      "title": "BCS theory explains conventional superconductivity via phonon-mediated Cooper pairing — but high-Tc cuprates and iron-based superconductors violate BCS assumptions, and the pairing mechanism remains unknown.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "The BCS theory (Bardeen, Cooper, Schrieffer 1957) bridges quantum mechanics and materials science to explain conventional superconductivity: phonon-mediated (lattice vibration-mediated) effective electron-electron attraction overcomes Coulomb repulsion to form Cooper pairs — bound states of two elec",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-spin-fluctuation-pairing-cuprates"
      ],
      "communication_gap": "BCS theory requires quantum field theory fluency; materials synthesis of cuprates requires different expertise in solid-state chemistry. The theoretical and experimental communities overlap but insufficiently. High-Tc superconductivity research has become highly specialized, with few researchers spanning ab initio calculations, strongly correlated electron theory, and materials synthesis.\n",
      "translation_table": [
        {
          "field_a_term": "phonon (lattice vibration quantum)",
          "field_b_term": "pairing boson (mediator of effective e-e attraction)",
          "note": "In BCS, phonons mediate; in high-Tc, the mediator is unknown"
        },
        {
          "field_a_term": "Cooper pair (k↑, −k↓ bound state)",
          "field_b_term": "bosonic condensate (BCS-BEC crossover)",
          "note": "Cooper pairs condense into superconducting state via BEC-like mechanism"
        },
        {
          "field_a_term": "energy gap Δ (BCS)",
          "field_b_term": "d-wave gap Δ_k = Δ₀(cos k_x − cos k_y) (cuprates)",
          "note": "Gap symmetry distinguishes s-wave (BCS) from d-wave (cuprate) pairing"
        },
        {
          "field_a_term": "Fermi surface instability (Cooper instability)",
          "field_b_term": "Mott insulator to superconductor transition (doping-driven)",
          "note": "High-Tc superconductors emerge from Mott insulating parent compounds"
        },
        {
          "field_a_term": "isotope effect T_c ∝ M^{-1/2} (phonon signature)",
          "field_b_term": "anomalous isotope effect in cuprates (partial, field-dependent)",
          "note": "Partial isotope effect in cuprates suggests phonons play a secondary role"
        }
      ],
      "references": [
        {
          "note": "Bardeen, Cooper & Schrieffer (1957) Theory of superconductivity. Phys Rev 108:1175-1204"
        },
        {
          "note": "Cooper (1956) Bound electron pairs in a degenerate Fermi gas. Phys Rev 104:1189-1190"
        },
        {
          "note": "Bednorz & Müller (1986) Possible high Tc superconductivity in the Ba-La-Cu-O system. Z Phys B 64:189-193"
        },
        {
          "note": "Anderson (1987) The resonating valence bond state in La₂CuO₄ and superconductivity. Science 235:1196-1198"
        }
      ],
      "last_reviewed": "2026-05-06",
      "file": "cross-domain/materials-science-quantum-physics/b-bcs-superconductivity.yaml"
    },
    {
      "id": "b-carbon-nanotube-graphene-band-structure-zone-folding",
      "title": "Carbon nanotube electronic properties — metallic or semiconducting, with chirality- dependent band gaps — are derived from graphene band structure by zone-folding: wrapping the 2-D graphene Brillouin zone onto the 1-D nanotube cylinder.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "A single-walled nanotube (SWNT) of chiral vector (n,m) is a rolled-up graphene sheet. Zone-folding quantizes the transverse wavevector: k_⊥ = 2πq/C (q integer, C = |Ch| circumference). The 1-D band structure is the intersection of these quantization lines with graphene's 2-D band structure. If a lin",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-zone-folding-curvature-corrections-nanotube-gap"
      ],
      "communication_gap": "Materials scientists who grow and characterize nanotubes and quantum physicists who derive band structure from first principles share zone-folding but use different languages; zone-folding limitations (curvature effects, σ-π rehybridization) are known theoretically but rarely discussed in synthesis or device literatures.\n",
      "translation_table": [
        {
          "field_a_term": "nanotube chiral vector (n,m) (materials science)",
          "field_b_term": "quantization condition on graphene Brillouin zone (quantum physics)",
          "note": "Chiral vector defines circumference → quantization lines through the BZ"
        },
        {
          "field_a_term": "metallic vs. semiconducting nanotube (materials science)",
          "field_b_term": "K-point crossing condition (n−m ≡ 0 mod 3) (quantum physics)",
          "note": "Topological criterion — K-point inclusion is determined by mod-3 arithmetic"
        },
        {
          "field_a_term": "nanotube band gap Eg (materials science)",
          "field_b_term": "Dirac cone linear dispersion sampled off K-point (quantum physics)",
          "note": "Eg ≈ 2γa/d_t (γ = hopping integral, d_t = diameter) — graphene cone geometry"
        },
        {
          "field_a_term": "armchair nanotube (n,n) — always metallic (materials science)",
          "field_b_term": "quantization line through both K and K' Dirac points (quantum physics)",
          "note": "Armchair quantization lines pass exactly through Dirac points regardless of n"
        }
      ],
      "references": [
        {
          "doi": "10.1103/PhysRevLett.78.1932",
          "note": "Dresselhaus et al. (1998) — physical properties of carbon nanotubes"
        },
        {
          "doi": "10.1103/PhysRevB.57.R4145",
          "note": "Saito et al. (1992) — electronic structure of carbon nanotubes"
        },
        {
          "doi": "10.1038/35089553",
          "note": "Dekker (1999) — carbon nanotubes as molecular quantum wires"
        }
      ],
      "last_reviewed": "2026-05-07",
      "file": "cross-domain/materials-science-quantum-physics/b-carbon-nanotube-graphene-band-structure-zone-folding.yaml"
    },
    {
      "id": "b-josephson-junction-macroscopic-quantum-tunneling",
      "title": "The Josephson junction provides the cleanest experimental demonstration of macroscopic quantum tunneling: the phase difference across the junction is a quantum variable describing a collective degree of freedom of billions of Cooper pairs, and its tunneling through a classical energy barrier directly tests whether quantum mechanics applies to macroscopic objects.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "proposed",
      "bridge_claim": "Josephson (1962) predicted that Cooper pairs would tunnel coherently through a thin insulating barrier, producing a supercurrent with no voltage. This Josephson effect makes the phase difference phi across the junction a quantum degree of freedom described by the Schrodinger equation for a particle ",
      "open_unknowns": [],
      "related_hypotheses": [],
      "communication_gap": "",
      "translation_table": [
        {
          "field_a_term": "junction phase phi (quantum variable)",
          "field_b_term": "macroscopic quantum degree of freedom (order parameter phase)",
          "note": "phi describes coherent state of billions of Cooper pairs — a truly macroscopic quantum variable"
        },
        {
          "field_a_term": "tilted washboard potential U(phi) = -E_J cos(phi) - (hbar I / 2e) phi",
          "field_b_term": "potential energy landscape governing phase dynamics",
          "note": "Classically, phase slides (voltage); quantum mechanically, it can tunnel"
        },
        {
          "field_a_term": "plasma frequency omega_p = sqrt(2eI_c / hbar C)",
          "field_b_term": "small oscillation frequency in the potential well (sets tunneling rate)",
          "note": "Quantum zero-point fluctuations at omega_p are what drive MQT"
        },
        {
          "field_a_term": "Caldeira-Leggett dissipation",
          "field_b_term": "quasiparticle environment causing decoherence",
          "note": "The environmental coupling that limits MQT coherence and qubit lifetime T1"
        }
      ],
      "references": [
        {
          "doi": "10.1016/0031-9163(62)91369-0",
          "note": "Josephson (1962) — possible new effects in superconductive tunnelling"
        },
        {
          "doi": "10.1103/PhysRevLett.55.1543",
          "note": "Devoret et al. (1985) — measurements of macroscopic quantum tunneling out of the zero-voltage state"
        },
        {
          "doi": "10.1016/0378-4371(83)90002-7",
          "note": "Caldeira & Leggett (1983) — quantum tunnelling in a dissipative system"
        }
      ],
      "last_reviewed": "2026-05-07",
      "file": "cross-domain/materials-science-quantum-physics/b-josephson-junction-macroscopic-quantum-tunneling.yaml"
    },
    {
      "id": "b-magnons-spin-wave-collective-excitations",
      "title": "Magnons (spin waves) are the Goldstone bosons of spontaneously broken spin-rotation symmetry in ferromagnets: their dispersion ω∝k² (ferromagnets) or ω∝k (antiferromagnets) follows from the same quantum field theory as phonons",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "In a ferromagnet below the Curie temperature, continuous spin-rotation symmetry is spontaneously broken. Goldstone's theorem guarantees massless (gapless) bosonic excitations: spin waves, quantized as magnons with energy ℏω(k). In the long-wavelength limit, spin-wave theory (Holstein-Primakoff trans",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-magnons-spin-wave-collective-excitations"
      ],
      "communication_gap": "Condensed matter physicists who work on magnons use spin-wave theory and Holstein-Primakoff bosons but the connection to Goldstone's theorem and quantum field theory of spontaneous symmetry breaking is often presented separately in high-energy physics curricula. The mathematical unity of phonons, magnons, and pions as Goldstone bosons is powerful but not standard in either condensed matter or particle physics textbooks.\n",
      "translation_table": [
        {
          "field_a_term": "magnon (quantum of spin wave) in a ferromagnet",
          "field_b_term": "Goldstone boson of broken continuous spin-rotation symmetry",
          "note": "Goldstone theorem: one massless boson per broken generator of the symmetry group"
        },
        {
          "field_a_term": "spin stiffness D (meV·Å²)",
          "field_b_term": "slope of quadratic dispersion ω=Dk² analogous to phonon speed of sound",
          "note": "D measured by neutron scattering; determines magnon mean free path and thermal conductance"
        },
        {
          "field_a_term": "Holstein-Primakoff transformation S⁺→√(2S-a†a)·a",
          "field_b_term": "bosonization of spin operators — maps spin Hamiltonian to harmonic oscillator",
          "note": "Leading order gives free magnon gas; higher order gives magnon-magnon interactions"
        },
        {
          "field_a_term": "magnon heat capacity C ∝ T^(3/2) (ferromagnet)",
          "field_b_term": "Bose-Einstein integral over quadratic magnon density of states",
          "note": "Same integral as phonon Debye T³ law but with different power due to ω∝k² dispersion"
        }
      ],
      "references": [
        {
          "doi": "10.1103/PhysRev.58.1098",
          "note": "Holstein & Primakoff (1940) Field dependence of the intrinsic domain magnetization of a ferromagnet. Phys Rev 58:1098"
        },
        {
          "doi": "10.1103/PhysRev.124.246",
          "note": "Goldstone (1961) Field theories with superconductor solutions. Nuovo Cimento 19:154"
        },
        {
          "doi": "10.1038/nphys3347",
          "note": "Chumak et al. (2015) Magnon spintronics. Nat Phys 11:453"
        }
      ],
      "last_reviewed": "2026-05-07",
      "file": "cross-domain/materials-science-quantum-physics/b-magnons-spin-wave-collective-excitations.yaml"
    },
    {
      "id": "b-quantum-dots-particle-in-a-box",
      "title": "Semiconductor quantum dots are physical realizations of the quantum-mechanical particle-in-a-box: three-dimensional carrier confinement in a nanometer-scale crystal shifts energy levels according to E_n = h^2 n^2 / (8 m* L^2), making emission wavelength continuously tunable by dot size through the same quantum confinement that transforms a bulk semiconductor band gap into discrete atomic-like levels",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "In a quantum dot of diameter d, the kinetic energy of an electron (hole) confined to a sphere of radius r = d/2 is quantized as delta_E = h^2/(8 m* r^2) (Brus equation); this confinement energy adds to the bulk band gap to blue-shift absorption and emission in proportion to 1/r^2, directly mapping t",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-quantum-dot-emission-confinement-scaling"
      ],
      "communication_gap": "Materials chemists synthesizing colloidal quantum dots focus on ligand chemistry and size dispersity control while quantum physicists study confinement effects theoretically; the practical Brus equation is widely used but surface-state and strain corrections require tight-binding or DFT methods not routinely applied in synthesis labs.",
      "translation_table": [
        {
          "field_a_term": "quantum dot radius r (materials science)",
          "field_b_term": "box length L in particle-in-a-box (quantum physics)",
          "note": "Confinement energy scales as h^2/(8 m* r^2) = E_1 for a 3D spherical well, analogous to 1D box"
        },
        {
          "field_a_term": "effective mass m* of electron/hole in semiconductor (materials science)",
          "field_b_term": "particle mass m in quantum mechanics (quantum physics)",
          "note": "Band effective mass replaces free electron mass; lighter m* gives larger confinement shift at same size"
        },
        {
          "field_a_term": "exciton Bohr radius a_B (materials science)",
          "field_b_term": "characteristic length scale of quantum confinement (quantum physics)",
          "note": "Strong confinement regime when r << a_B; in this limit the Brus equation is most accurate"
        },
        {
          "field_a_term": "size-tunable photoluminescence peak (materials science)",
          "field_b_term": "quantized energy level spacing E_n - E_{n-1} (quantum physics)",
          "note": "PL emission peak directly reports the lowest confinement energy gap, analogous to 1->0 ground-state transition"
        }
      ],
      "references": [
        {
          "doi": "10.1021/ja00355a005",
          "note": "Brus (1984) - electron-electron and electron-hole interactions in small semiconductor crystallites: Brus equation derivation"
        },
        {
          "doi": "10.1126/science.271.5251.933",
          "note": "Alivisatos (1996) - semiconductor clusters, nanocrystals, and quantum dots (review)"
        },
        {
          "doi": "10.1021/nn1000593",
          "note": "Efros & Brus (2021) - nanocrystal quantum dots: from discovery to modern development (retrospective)"
        }
      ],
      "last_reviewed": "2026-05-07",
      "file": "cross-domain/materials-science-quantum-physics/b-quantum-dots-particle-in-a-box.yaml"
    },
    {
      "id": "b-alloy-strengthening-dislocation-theory",
      "title": "Alloy mechanical strength is governed by dislocation theory: the Taylor relation sigma_y = M*alpha*G*b*sqrt(rho) bridges materials science and solid mechanics by quantifying how dislocation density rho controls yield stress through line tension and Peierls barrier physics.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "The yield strength of metallic alloys is determined by the density and mobility of dislocations (line defects in the crystal lattice): the Taylor hardening relation sigma_y = M*alpha*G*b*sqrt(rho) relates yield stress to dislocation density rho, shear modulus G, Burgers vector magnitude b, and orien",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-high-entropy-alloy-dislocation-cocktail-hardening"
      ],
      "communication_gap": "Materials scientists characterize alloy microstructure experimentally while solid mechanics researchers develop continuum plasticity models; the dislocation theory bridge is mature at the research level but routine alloy design still uses empirical composition-property relations rather than physics-based dislocation models, limiting the pace of new alloy development.\n",
      "translation_table": [
        {
          "field_a_term": "yield strength sigma_y (materials science)",
          "field_b_term": "critical resolved shear stress tau_c (solid mechanics)",
          "note": "Taylor factor M converts between single-crystal CRSS and polycrystal yield stress"
        },
        {
          "field_a_term": "dislocation density rho (materials science)",
          "field_b_term": "internal stress field from forest dislocations (solid mechanics)",
          "note": "Forest dislocations create a long-range stress field that obstructs glide dislocations"
        },
        {
          "field_a_term": "precipitate shearing vs. Orowan looping (materials science)",
          "field_b_term": "obstacle bypass mechanisms for dislocation glide (solid mechanics)",
          "note": "The transition from shearing to looping determines maximum precipitation hardening"
        },
        {
          "field_a_term": "Hall-Petch grain boundary strengthening (materials science)",
          "field_b_term": "dislocation pile-up stress concentration at grain boundaries (solid mechanics)",
          "note": "sigma_y ~ k/sqrt(d) arises from stress needed to propagate slip across a pile-up"
        }
      ],
      "references": [
        {
          "doi": "10.1098/rspa.1934.0106",
          "note": "Taylor (1934) - mechanism of plastic deformation of crystals; original dislocation concept"
        },
        {
          "doi": "10.1016/0001-6160(959)90171-9",
          "note": "Orowan (1959) - causes and effects of internal stresses in metals"
        },
        {
          "doi": "10.1038/s41524-020-0289-z",
          "note": "Varvenne et al. (2020) - theory of strengthening in high-entropy alloys"
        }
      ],
      "last_reviewed": "2026-05-07",
      "file": "cross-domain/materials-science-solid-mechanics/b-alloy-strengthening-dislocation-theory.yaml"
    },
    {
      "id": "b-auxetic-materials-negative-poisson-ratio",
      "title": "Auxetic materials exhibit a negative Poisson's ratio (ν < 0) because their re-entrant or chiral microgeometries cause lateral expansion under axial tension, a counterintuitive behavior predicted by continuum elasticity theory and enabling programmable mechanical metamaterial design\n",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "In conventional materials ν > 0 (lateral contraction under axial tension), but auxetic materials with re-entrant honeycomb, rotating rigid unit, or chiral lattice microstructures exhibit ν as low as -1, because the geometric kinematics of their unit cells force lateral expansion under extension; lin",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-reentrant-geometry-auxetic-impact-resistance"
      ],
      "communication_gap": "Materials scientists characterize auxetic behavior experimentally while continuum mechanicists develop elasticity theory; the design of auxetic metamaterials from first principles (prescribing a target ν then computing the required microgeometry) is not yet systematized in either community.\n",
      "translation_table": [
        {
          "field_a_term": "re-entrant honeycomb microgeometry (materials science)",
          "field_b_term": "unit cell kinematics producing negative lateral strain under axial loading (mechanics)",
          "note": "Re-entrant angle geometry forces cell walls to unfold laterally when stretched axially"
        },
        {
          "field_a_term": "negative Poisson's ratio ν < 0 (materials science)",
          "field_b_term": "coupling coefficient between axial and transverse strain components in compliance tensor (mechanics)",
          "note": "ν = -ε_transverse / ε_axial; negative value is allowed by thermodynamic stability conditions: -1 ≤ ν ≤ 0.5"
        },
        {
          "field_a_term": "auxetic foam / fiber composite (materials science)",
          "field_b_term": "engineered compliance tensor with off-diagonal coupling (mechanics)",
          "note": "Auxetic composites are designed by tailoring fiber orientation and matrix properties to achieve target ν"
        },
        {
          "field_a_term": "energy absorption enhancement in auxetic materials (materials science)",
          "field_b_term": "increased indentation resistance due to material densification under load (mechanics)",
          "note": "Negative ν causes material to flow toward indenter under impact, increasing resistance"
        }
      ],
      "references": [
        {
          "doi": "10.1038/357475a0",
          "note": "Lakes (1987) - foam structures with a negative Poisson's ratio (original auxetic paper)"
        },
        {
          "doi": "10.1002/adma.201801103",
          "note": "Ren et al. (2018) - auxetic metamaterials and structures: a review"
        }
      ],
      "last_reviewed": "2026-05-07",
      "file": "cross-domain/materials-science-solid-mechanics/b-auxetic-materials-negative-poisson-ratio.yaml"
    },
    {
      "id": "b-dendrite-growth-diffusion-limited-aggregation",
      "title": "Dendritic crystal growth is governed by the same diffusion-limited aggregation mathematics that generates fractal clusters in statistical physics, with the Mullins-Sekerka instability controlling tip-splitting and branch morphology.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "Solidification dendrites grow by the same rule as DLA (diffusion-limited aggregation): the local growth rate is proportional to the gradient of a Laplacian field (heat or solute diffusion), so the interface is unstable to perturbations (Mullins-Sekerka instability) and develops fractal, branching ar",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-phase-field-universality-solidification-DLA"
      ],
      "communication_gap": "Materials scientists studying solidification and statistical physicists studying DLA use identical mathematics but publish in separate journals (Acta Materialia vs. Physical Review Letters); the phase-field community bridges them, but many solidification engineers remain unaware of the fractal-geometry literature.\n",
      "translation_table": [
        {
          "field_a_term": "dendrite tip radius and growth velocity (materials science)",
          "field_b_term": "DLA cluster tip dynamics (statistical physics)",
          "note": "Both obey the same Laplacian growth law; tip radius scales with diffusion length"
        },
        {
          "field_a_term": "Mullins-Sekerka morphological instability (materials science)",
          "field_b_term": "tip-splitting in DLA (statistical physics)",
          "note": "Linear stability analysis of a planar front maps onto the DLA branching criterion"
        },
        {
          "field_a_term": "anisotropic surface energy / crystallographic symmetry (materials science)",
          "field_b_term": "anisotropy-controlled fractal dimension (statistical physics)",
          "note": "Cubic surface energy anisotropy selects four-fold dendritic symmetry vs. isotropic DLA"
        },
        {
          "field_a_term": "solute or thermal diffusion field (materials science)",
          "field_b_term": "harmonic potential / random-walk probability field (statistical physics)",
          "note": "Both fields satisfy ∇²u = 0 at quasi-steady state; growth velocity ∝ ∇u·n̂"
        }
      ],
      "references": [
        {
          "doi": "10.1103/PhysRevLett.52.1433",
          "note": "Witten & Sander (1981) — original DLA model showing Laplacian growth mechanism"
        },
        {
          "doi": "10.1007/BF00900659",
          "note": "Mullins & Sekerka (1964) — morphological instability of a solidifying interface"
        },
        {
          "doi": "10.1103/RevModPhys.74.991",
          "note": "Karma & Rappel (2002) — phase-field model unifying dendrite growth and DLA"
        }
      ],
      "last_reviewed": "2026-05-07",
      "file": "cross-domain/materials-science-statistical-physics/b-dendrite-growth-diffusion-limited-aggregation.yaml"
    },
    {
      "id": "b-fisher-information-design-x-autonomous-materials-experiments",
      "title": "Fisher-information design connects statistical efficiency bounds to autonomous materials-experiment scheduling.",
      "source_domain": "",
      "target_domain": "",
      "status": "proposed",
      "bridge_claim": "Autonomous labs choose the next experiment under budget constraints; Fisher-information criteria convert that choice into a measurable precision objective and make exploration policies auditable.",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-information-optimal-batching-accelerates-material-discovery"
      ],
      "communication_gap": "Adjacent communities use different software stacks and validation norms, so mathematically equivalent tools are often rediscovered in parallel.",
      "translation_table": [
        {
          "field_a_term": "Design matrix sensitivity",
          "field_b_term": "Instrument setting perturbation response",
          "note": "Determines expected parameter precision gain."
        },
        {
          "field_a_term": "Cramer-Rao bound",
          "field_b_term": "Best-case composition estimate variance",
          "note": "Lower bound for feasible uncertainty."
        },
        {
          "field_a_term": "D-optimal objective",
          "field_b_term": "Batch experiment utility score",
          "note": "Supports compute-constrained ranking."
        }
      ],
      "references": [
        {
          "doi": "10.1098/rsta.1922.0009",
          "note": "Fisher (1922) estimation and information."
        },
        {
          "doi": "10.1017/S0962492910000061",
          "note": "Stuart (2010) Bayesian inverse-problem foundations."
        }
      ],
      "last_reviewed": "2026-05-08",
      "file": "cross-domain/materials-science-statistics/b-fisher-information-design-x-autonomous-materials-experiments.yaml"
    },
    {
      "id": "b-semiconductor-doping-fermi-level-chemical-potential",
      "title": "Semiconductor doping is a chemical potential engineering problem: the Fermi level is the electrochemical potential of electrons, and donor/acceptor impurities shift it by changing the electron chemical potential exactly as pH is shifted by acid/base additions, unifying solid-state physics, thermodynamics, and electrochemistry through the single concept of electron chemical potential.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "In thermodynamic equilibrium, the Fermi level E_F is the chemical potential of electrons: E_F = dG/dN|_{T,P,N_other}. Donor impurities donate electrons to the conduction band, raising E_F toward the conduction band minimum (n-type); acceptor impurities accept electrons from the valence band, lowerin",
      "open_unknowns": [],
      "related_hypotheses": [],
      "communication_gap": "Semiconductor physicists learn band diagrams without connecting them explicitly to thermodynamic potential theory; thermodynamicists rarely study semiconductor devices. The Fermi level as chemical potential is stated but not developed in most solid-state physics textbooks, disconnecting semiconductor engineering from physical chemistry and electrochemistry.\n",
      "translation_table": [
        {
          "field_a_term": "chemical potential mu_e = E_F (thermodynamics)",
          "field_b_term": "Fermi level in semiconductor (materials science)",
          "note": "E_F is literally the chemical potential of electrons; both obey dG/dN = mu"
        },
        {
          "field_a_term": "acid-base equilibrium shifting pH (thermodynamics)",
          "field_b_term": "donor/acceptor doping shifting Fermi level (materials science)",
          "note": "Donor adds electrons (raises E_F), like acid adds protons (lowers pH) - exact chemical potential analogy"
        },
        {
          "field_a_term": "concentration cell EMF = (kT/e) ln(c_1/c_2) (thermodynamics)",
          "field_b_term": "p-n junction built-in potential V_bi (materials science)",
          "note": "V_bi = (kT/e) ln(N_D N_A / n_i^2) is the Nernst equation for electron chemical potential difference"
        },
        {
          "field_a_term": "Gibbs free energy minimum at equilibrium (thermodynamics)",
          "field_b_term": "Fermi level equalisation across interfaces at thermal equilibrium (materials science)",
          "note": "Flat Fermi level in equilibrium is the semiconductor expression of dG = 0"
        }
      ],
      "references": [
        {
          "doi": "10.1103/PhysRevB.37.2934",
          "note": "Van de Walle & Martin (1987) - theoretical study of band offsets at semiconductor interfaces"
        },
        {
          "doi": "10.1103/PhysRevLett.84.1812",
          "note": "Neugebauer & Van de Walle (1999) - chemical potential dependence of defect formation energies in GaN"
        },
        {
          "doi": "10.1017/CBO9780511805523",
          "note": "Sze & Ng (2007) - Physics of Semiconductor Devices; Fermi level and chemical potential"
        }
      ],
      "last_reviewed": "2026-05-07",
      "file": "cross-domain/materials-science-thermodynamics/b-semiconductor-doping-fermi-level-chemical-potential.yaml"
    },
    {
      "id": "b-thermoelectric-efficiency-seebeck-onsager",
      "title": "Thermoelectric efficiency is governed by the dimensionless figure of merit zT = S^2 sigma T / kappa, where the Seebeck coefficient S, electrical conductivity sigma, and thermal conductivity kappa are related by the Onsager reciprocal relations of irreversible thermodynamics — the same phenomenological framework that unifies thermoelectric, Peltier, and Thomson effects as off-diagonal elements of a generalized transport coefficient matrix",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "The Onsager formalism writes the heat flux J_Q and electric current J_e as J_e = L_11 * (-grad mu / T) + L_12 * (-grad T / T^2) and J_Q = L_21 * (-grad mu / T) + L_22 * (-grad T / T^2), where Onsager symmetry requires L_12 = L_21 (= T * sigma * S); the zT figure of merit emerges as zT = L_12^2 / (L_",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-thermoelectric-phonon-glass-electron-crystal"
      ],
      "communication_gap": "Materials scientists optimize thermoelectrics empirically by trial-and-error synthesis while thermodynamicists work with abstract Onsager theory; the connection between zT and Onsager coupling is textbook knowledge in thermoelectrics but thermodynamic upper bounds on zT (beyond the Carnot limit argument) are not routinely cited in synthesis papers.",
      "translation_table": [
        {
          "field_a_term": "Seebeck coefficient S (thermopower) (materials science)",
          "field_b_term": "off-diagonal Onsager coefficient L_12 / (T * sigma) in transport matrix (thermodynamics)",
          "note": "S measures the voltage generated per unit temperature gradient; S = L_12 / (T * L_11) in Onsager notation"
        },
        {
          "field_a_term": "power factor S^2 sigma (materials science)",
          "field_b_term": "squared off-diagonal Onsager coefficient L_12^2 / (T^2 * L_11) (thermodynamics)",
          "note": "Power factor is the electrical power output per temperature gradient; directly proportional to L_12^2"
        },
        {
          "field_a_term": "thermal conductivity kappa = kappa_e + kappa_L (materials science)",
          "field_b_term": "diagonal thermal transport coefficient L_22 (thermodynamics)",
          "note": "Electronic (kappa_e = L_T*T, Wiedemann-Franz) and lattice (kappa_L) contributions enter L_22 additively"
        },
        {
          "field_a_term": "figure of merit zT (materials science)",
          "field_b_term": "coupling coefficient (L_12^2 / L_11*L_22 in Onsager matrix) (thermodynamics)",
          "note": "zT is the coupling strength of the off-diagonal Onsager terms relative to the diagonal; zT = 1 at 50% Carnot efficiency"
        }
      ],
      "references": [
        {
          "doi": "10.1103/PhysRev.37.405",
          "note": "Onsager (1931) - reciprocal relations in irreversible processes I (foundational Onsager paper)"
        },
        {
          "doi": "10.1126/science.1155592",
          "note": "Snyder & Toberer (2008) - complex thermoelectric materials (zT optimization review)"
        },
        {
          "doi": "10.1063/1.1723226",
          "note": "Ioffe (1957) - semiconductor thermoelements and thermoelectric cooling (zT figure of merit)"
        }
      ],
      "last_reviewed": "2026-05-07",
      "file": "cross-domain/materials-science-thermodynamics/b-thermoelectric-efficiency-seebeck-onsager.yaml"
    },
    {
      "id": "b-knot-invariants-x-dna-topology",
      "title": "Knot Invariants x DNA Topology - topoisomerase as knot simplifier\n",
      "source_domain": "",
      "target_domain": "",
      "status": "proposed",
      "bridge_claim": "DNA in vivo is knotted and catenated due to replication and transcription; topoisomerases catalyze specific topological changes (strand passage, religation) that reduce writhe and linking number - mathematically, they compute knot invariants and perform Reidemeister moves on the DNA knot diagram, ma",
      "open_unknowns": [],
      "related_hypotheses": [],
      "communication_gap": "Knot theory and DNA topology developed in parallel for 30 years; topologists developed invariants (Jones polynomial, HOMFLY) while molecular biologists characterized topoisomerases, with limited cross-talk until Sumners, Dean, and Cozzarelli connected them in the late 1980s-1990s.\n",
      "translation_table": [
        {
          "field_a_term": "DNA supercoiling (writhe Wr)",
          "field_b_term": "Writhe of a knot (signed crossing number)",
          "note": "Writhe counts the net excess of positive over negative crossings in a DNA projection; it is the biologically relevant topological invariant that supercoiling relaxation enzymes (Gyrase, Topo I) modify.\n"
        },
        {
          "field_a_term": "Topoisomerase II (type II topoisomerase)",
          "field_b_term": "Strand-passage move (Reidemeister move II)",
          "note": "Topo II passes one DNA double strand through another via a transient break, implementing the Reidemeister Type II move that changes crossing number by 2 - the elementary step in knot simplification.\n"
        },
        {
          "field_a_term": "DNA catenane (two linked circular DNA molecules)",
          "field_b_term": "Hopf link (simplest 2-component link)",
          "note": "Newly replicated circular DNA molecules are interlocked as a catenane; decatenation by Topo II is equivalent to unlinking the Hopf link by strand passage moves.\n"
        },
        {
          "field_a_term": "Knot probability in confined DNA",
          "field_b_term": "Knot spectrum of random closed curves",
          "note": "The probability distribution of knot types in bacteriophage DNA matches the theoretical knot spectrum of random closed curves in confined volume, confirming the topological equilibrium predicted by polymer knot theory.\n"
        }
      ],
      "references": [
        {
          "doi": "10.1038/22605",
          "note": "Rybenkov et al. (1997) - simplification of DNA topology by type II topoisomerases; Nature 388:627"
        },
        {
          "doi": "10.1073/pnas.80.14.4519",
          "note": "Sumners & Whittington (1988) - knots in self-avoiding walks; mathematical framework for DNA knotting"
        }
      ],
      "last_reviewed": "2026-05-07",
      "file": "cross-domain/math-biology/b-knot-invariants-x-dna-topology.yaml"
    },
    {
      "id": "b-persistence-homology-x-protein-structure",
      "title": "Persistent homology x Protein structure - topological data analysis of folded chains\n",
      "source_domain": "",
      "target_domain": "",
      "status": "proposed",
      "bridge_claim": "Persistent homology (TDA) captures multi-scale topological features (loops = beta-barrels, voids = hydrophobic cores) in protein contact networks and 3D atomic coordinates that are invisible to RMSD or distance-based metrics; persistence diagrams of alpha-helices, beta-sheets, and binding pockets en",
      "open_unknowns": [],
      "related_hypotheses": [],
      "communication_gap": "Structural biologists characterizing proteins by crystallography/NMR and topologists developing persistent homology (Edelsbrunner et al. 2000) worked in separate fields; the application of TDA to proteins (Xia and Wei 2014) is recent and protein databases (PDB) are not routinely analyzed with TDA tools — representing a large underexploited opportunity since AlphaFold2-predicted structures number in the hundreds of millions and topological analysis scales well.\n",
      "translation_table": [
        {
          "field_a_term": "protein alpha-helix backbone geometry (structural biology)",
          "field_b_term": "1-cycle (loop) in Vietoris-Rips filtration of atomic coordinates (topology)",
          "note": "Alpha-helices appear as persistent H1 classes (loops) in TDA; their persistence encodes helix length and stability"
        },
        {
          "field_a_term": "protein beta-barrel hydrophobic core (structural biology)",
          "field_b_term": "2-cycle (void) in H2 of protein atomic point cloud (topology)",
          "note": "Beta-barrels produce persistent H2 generators (voids); persistence lifetime correlates with barrel stability"
        },
        {
          "field_a_term": "protein RMSD (root mean square deviation) distance metric (bioinformatics)",
          "field_b_term": "bottleneck distance between persistence diagrams (mathematics)",
          "note": "Bottleneck/Wasserstein distance between persistence diagrams is more sensitive to functionally relevant structural changes than RMSD"
        },
        {
          "field_a_term": "protein contact map (bioinformatics)",
          "field_b_term": "1-skeleton of simplicial complex / filtered graph (topology)",
          "note": "The protein contact map is the 1-skeleton; TDA extends this to higher simplices capturing higher-order structural features"
        }
      ],
      "references": [
        {
          "doi": "10.1038/s41598-020-67423-w",
          "note": "Xia & Wei (2014/2020) - Persistent homology analysis of protein structure, flexibility and folding; Int J Numer Method Biomed Eng"
        }
      ],
      "last_reviewed": "2026-05-07",
      "file": "cross-domain/math-biology/b-persistence-homology-x-protein-structure.yaml"
    },
    {
      "id": "b-topological-data-analysis-x-cancer-genomics",
      "title": "Topological Data Analysis x Cancer Genomics - persistent homology of mutation landscapes\n",
      "source_domain": "",
      "target_domain": "",
      "status": "proposed",
      "bridge_claim": "Tumor genome somatic mutation patterns form high-dimensional data clouds whose topological features (connected components, loops) reveal cancer subtypes and evolutionary trajectories invisible to clustering methods; TDA of single-cell genomic data identifies cancer stem cell populations as topologic",
      "open_unknowns": [],
      "related_hypotheses": [],
      "communication_gap": "Algebraic topology (homology, persistent homology) is rarely taught in biology or bioinformatics; the TDA literature is mathematically demanding; the Nicolau et al. (2011) PNAS paper introduced TDA to cancer genomics but the method has seen limited adoption due to the conceptual gap between topologists and biologists.\n",
      "translation_table": [
        {
          "field_a_term": "Tumor mutation spectrum (somatic variant matrix, patients x genes)",
          "field_b_term": "Point cloud in high-dimensional space",
          "note": "Each tumor is a point in a space of dimension equal to the number of genomic features; the topology of this point cloud (how points cluster, form loops, are connected) encodes the structure of cancer subtype relationships without assuming Euclidean distance.\n"
        },
        {
          "field_a_term": "Cancer subtype boundaries (e.g. luminal A vs basal breast)",
          "field_b_term": "Connected components in the Vietoris-Rips complex (beta_0)",
          "note": "Persistent 0-dimensional homology identifies connected components that persist across many distance thresholds, corresponding to robust cancer subtype clusters; components that appear and die quickly are noise.\n"
        },
        {
          "field_a_term": "Continuous tumor progression (e.g. from adenoma to carcinoma)",
          "field_b_term": "Persistent 1-cycles (loops, beta_1) in the data complex",
          "note": "If tumor evolution produces a cyclic trajectory (e.g. circular progression of mutation states), TDA captures this as a persistent 1-cycle invisible to tree-based phylogenetic methods.\n"
        },
        {
          "field_a_term": "Cancer stem cell population (rare, high-plasticity cells)",
          "field_b_term": "Topological outlier (low persistence, isolated point)",
          "note": "Cancer stem cells occupy distinct positions in gene expression space; their topological outlier status (high eccentricity in the data complex) can be detected by TDA even when they are too rare for clustering methods.\n"
        }
      ],
      "references": [
        {
          "doi": "10.1073/pnas.1102826108",
          "note": "Nicolau, Levine & Carlsson (2011) - topology based data analysis identifies a subgroup of breast cancers with a unique mutational profile and excellent survival; PNAS 108:7265"
        },
        {
          "doi": "10.1038/nbt.2862",
          "note": "Lum et al. (2013) - extracting insights from the shape of complex data using topology; Scientific Reports"
        }
      ],
      "last_reviewed": "2026-05-07",
      "file": "cross-domain/math-biology/b-topological-data-analysis-x-cancer-genomics.yaml"
    },
    {
      "id": "b-category-theory-x-functional-programming",
      "title": "Category theory x Functional programming - functors as type constructors\n",
      "source_domain": "",
      "target_domain": "",
      "status": "proposed",
      "bridge_claim": "The Curry-Howard-Lambek correspondence establishes a three-way isomorphism between typed lambda calculus, intuitionistic logic, and Cartesian closed categories; monads in Haskell are exactly monads in category theory (a monoid in the category of endofunctors), making functional programming a form of",
      "open_unknowns": [],
      "related_hypotheses": [],
      "communication_gap": "Category theorists (Eilenberg & Mac Lane 1945) and computer scientists developing functional programming (McCarthy 1958, Landin 1966) worked independently; the connection through Cartesian closed categories was noted by Lawvere (1969) and Lambek (1980) but only entered practical programming through Haskell monads (Wadler 1992) — decades after the mathematical theory was complete, illustrating the slow transfer of abstract mathematics to programming language design.\n",
      "translation_table": [
        {
          "field_a_term": "Haskell type constructor (e.g. Maybe, List, IO) (programming)",
          "field_b_term": "endofunctor F: C -> C in category theory (mathematics)",
          "note": "Every Haskell Functor instance is a categorical endofunctor; fmap is the action on morphisms (functions)"
        },
        {
          "field_a_term": "Haskell monad (return, bind >>= operations) (programming)",
          "field_b_term": "monad (T, eta, mu) — monoid in endofunctor category (category theory)",
          "note": "return is the unit natural transformation eta, bind encodes the multiplication mu; monad laws are exactly the monoid laws"
        },
        {
          "field_a_term": "polymorphic function (parametric polymorphism) (type theory)",
          "field_b_term": "natural transformation between functors (category theory)",
          "note": "Wadler's theorems for free: every parametrically polymorphic function is a natural transformation by Reynold's parametricity"
        },
        {
          "field_a_term": "Curry-Howard isomorphism (propositions as types, proofs as programs) (logic/CS)",
          "field_b_term": "Lambek correspondence (propositions as objects, proofs as morphisms) (category theory)",
          "note": "The three-way Curry-Howard-Lambek isomorphism unifies logic, type theory, and Cartesian closed categories"
        }
      ],
      "references": [
        {
          "doi": "10.1145/75277.75285",
          "note": "Wadler (1989) - Theorems for free! FPCA 1989; establishes natural transformation interpretation of polymorphism"
        }
      ],
      "last_reviewed": "2026-05-07",
      "file": "cross-domain/math-cs/b-category-theory-x-functional-programming.yaml"
    },
    {
      "id": "b-expander-graphs-x-error-correcting-codes",
      "title": "Expander Graphs x Error-Correcting Codes - spectral gap as code distance\n",
      "source_domain": "",
      "target_domain": "",
      "status": "proposed",
      "bridge_claim": "Expander graphs (high connectivity, small spectral gap in the Laplacian) are the combinatorial objects underlying modern error-correcting codes; LDPC codes and turbo codes have Tanner graphs that are expanders, and the spectral gap of the Tanner graph determines the code's minimum distance and itera",
      "open_unknowns": [],
      "related_hypotheses": [],
      "communication_gap": "Algebraic coding theory (Hamming, BCH, Reed-Solomon codes) and graph expansion theory (Margulis, Lubotzky-Phillips-Sarnak) developed independently; their connection through Tanner codes and LDPC codes was established only in the 1996-2001 period (Sipser-Spielman, Luby et al., Richardson-Urbanke).\n",
      "translation_table": [
        {
          "field_a_term": "Spectral gap (lambda_2 - lambda_1 of graph Laplacian)",
          "field_b_term": "Code minimum distance / threshold for iterative decoding",
          "note": "The Ramanujan bound (spectral gap = 2*sqrt(d-1) - epsilon) characterizes optimal expanders; LDPC codes whose Tanner graphs meet this bound achieve the best-known minimum distance scaling and threshold performance under belief propagation decoding.\n"
        },
        {
          "field_a_term": "Tanner graph (bipartite factor graph of LDPC code)",
          "field_b_term": "Bipartite expander graph",
          "note": "The Tanner graph of an LDPC code is a bipartite graph between variable nodes (codeword bits) and check nodes (parity constraints); its expansion ratio (minimum vertex expansion over all small subsets) controls the minimum distance of the code.\n"
        },
        {
          "field_a_term": "Belief propagation (iterative decoding)",
          "field_b_term": "Expander mixing lemma convergence",
          "note": "Belief propagation converges to the correct codeword when the Tanner graph has sufficient expansion; the expander mixing lemma bounds the number of iterations needed for convergence as a function of spectral gap.\n"
        },
        {
          "field_a_term": "Girth (length of shortest cycle in Tanner graph)",
          "field_b_term": "Locally tree-like structure of expander",
          "note": "High girth Tanner graphs are locally tree-like (no short cycles), enabling belief propagation to behave correctly for short block lengths; girth is related to expansion via the Moore bound.\n"
        }
      ],
      "references": [
        {
          "doi": "10.1109/18.910575",
          "note": "Sipser & Spielman (1996) - expander codes; IEEE Trans Information Theory 42:1710"
        },
        {
          "doi": "10.1109/18.910574",
          "note": "Luby et al. (2001) - improved low-density parity-check codes using irregular graphs; IEEE TIT"
        }
      ],
      "last_reviewed": "2026-05-07",
      "file": "cross-domain/math-cs/b-expander-graphs-x-error-correcting-codes.yaml"
    },
    {
      "id": "b-fourier-transform-x-signal-processing",
      "title": "Fourier transform x Signal processing — frequency domain as dual representation\n",
      "source_domain": "",
      "target_domain": "",
      "status": "proposed",
      "bridge_claim": "The discrete Fourier transform (DFT) and its fast algorithm (FFT) provide an exact dual representation of any finite signal in the frequency domain; the convolution theorem (multiplication in frequency = convolution in time) reduces O(n^2) correlation to O(n log n) FFT — the mathematical backbone of",
      "open_unknowns": [],
      "related_hypotheses": [],
      "communication_gap": "Pure mathematicians studying Fourier analysis and electrical engineers developing signal processing algorithms developed the same theory independently; Cooley and Tukey's 1965 FFT paper was not recognized as rediscovering Gauss's 1805 algorithm until the history was researched afterward, illustrating how disciplinary separation delays algorithm diffusion.\n",
      "translation_table": [
        {
          "field_a_term": "DFT frequency bin X[k] (signal processing)",
          "field_b_term": "Fourier coefficient in orthonormal basis expansion (mathematics)",
          "note": "The DFT decomposes a signal into complex exponential basis functions; this is an exact isometry (Parseval's theorem)"
        },
        {
          "field_a_term": "convolution in time domain (signal processing)",
          "field_b_term": "pointwise multiplication in frequency domain (mathematics)",
          "note": "The convolution theorem is the mathematical fact that the Fourier transform diagonalizes convolution operators"
        },
        {
          "field_a_term": "FFT butterfly algorithm (computer science)",
          "field_b_term": "divide-and-conquer factorization of DFT matrix (mathematics)",
          "note": "The FFT exploits the factorization of the DFT matrix into sparse factors using roots of unity, reducing O(n^2) to O(n log n)"
        },
        {
          "field_a_term": "Nyquist sampling theorem (signal processing)",
          "field_b_term": "Paley-Wiener theorem / bandlimited functions (mathematics)",
          "note": "Shannon's sampling theorem is a consequence of the Paley-Wiener theorem; band-limiting in frequency implies exact reconstruction from samples at 2B Hz"
        }
      ],
      "references": [
        {
          "doi": "10.1090/S0025-5718-1965-0178586-1",
          "note": "Cooley & Tukey (1965) - An algorithm for the machine calculation of complex Fourier series; Math Comp 19:297"
        }
      ],
      "last_reviewed": "2026-05-07",
      "file": "cross-domain/math-cs/b-fourier-transform-x-signal-processing.yaml"
    },
    {
      "id": "b-tda-x-shape-recognition",
      "title": "Topological Data Analysis x Shape Recognition — Betti numbers as shape fingerprints\n",
      "source_domain": "",
      "target_domain": "",
      "status": "proposed",
      "bridge_claim": "Persistent homology computes Betti numbers (β₀: connected components, β₁: loops, β₂: voids) across all length scales simultaneously, producing a persistence diagram that is a provably stable shape fingerprint; this enables comparison of molecular structures, brain connectivity networks, and material",
      "open_unknowns": [],
      "related_hypotheses": [],
      "communication_gap": "Algebraic topology was a pure mathematics discipline with no computational focus; computational geometry developed shape analysis algorithms without topological rigor; persistent homology (Edelsbrunner, Letscher, Zomorodian 2002) created the bridge, but biological and materials science applications came 10 years later as the Ripser software made computation tractable.\n",
      "translation_table": [
        {
          "field_a_term": "Filtration (nested sequence of simplicial complexes)",
          "field_b_term": "Multi-scale shape descriptor",
          "note": "A filtration builds a nested family of topological spaces (Čech, Vietoris-Rips, or alpha complex) parameterized by scale ε; persistent homology tracks which topological features (holes, voids) appear and disappear across scales.\n"
        },
        {
          "field_a_term": "Persistence diagram (birth-death pairs)",
          "field_b_term": "Multi-scale topological fingerprint",
          "note": "Each topological feature (connected component, loop, void) is represented by a point (b, d) in the persistence diagram — birth scale b and death scale d; long-lived features (large d-b) are robust, short-lived features are noise.\n"
        },
        {
          "field_a_term": "Wasserstein/bottleneck stability theorem",
          "field_b_term": "Lipschitz continuity of the shape fingerprint",
          "note": "The stability theorem guarantees that small perturbations of point cloud data produce small perturbations in the persistence diagram (bottleneck distance); this is the mathematical basis for using persistence diagrams as robust features.\n"
        },
        {
          "field_a_term": "Betti numbers β₀, β₁, β₂",
          "field_b_term": "Topological invariants (components, loops, voids)",
          "note": "β₀ counts connected components; β₁ counts independent loops (genus in 2D); β₂ counts enclosed voids in 3D; these are coordinate-free shape descriptors invariant under continuous deformation.\n"
        }
      ],
      "references": [
        {
          "doi": "10.1090/S0273-0979-09-01249-X",
          "note": "Carlsson (2009) — Topology and data; Bull Amer Math Soc 46:255"
        }
      ],
      "last_reviewed": "2026-05-07",
      "file": "cross-domain/math-cs/b-tda-x-shape-recognition.yaml"
    },
    {
      "id": "b-tropical-geometry-x-neural-networks",
      "title": "Tropical geometry ↔ ReLU neural networks — piecewise-linear maps as tropical polynomials",
      "source_domain": "",
      "target_domain": "",
      "status": "proposed",
      "bridge_claim": "ReLU neural networks compute piecewise-linear functions that are exactly tropical polynomials in tropical (max-plus) algebra; the number of linear regions of a deep ReLU network grows exponentially with depth — a fact provable using tropical geometry — explaining why depth provides exponential repre",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-tropical-geometry-x-neural-networks"
      ],
      "communication_gap": "Tropical geometry was developed in algebraic geometry (Mikhalkin 2004, Speyer & Sturmfels 2004) as a tool for combinatorial and computational algebraic geometry. Neural network expressivity theory was developed in machine learning (Montufar 2014, Telgarsky 2016). The connection was made by Zhang et al. (2018) and Montufar et al. (2014), but remains largely unknown to both algebraic geometers (who don't study deep learning) and machine learning researchers (who rarely study tropical geometry).",
      "translation_table": [
        {
          "field_a_term": "ReLU activation function max(0, x) (neural network)",
          "field_b_term": "tropical addition a ⊕ b = max(a, b) in max-plus algebra",
          "note": "ReLU is exactly the tropical addition applied to a linear function and zero"
        },
        {
          "field_a_term": "piecewise-linear function computed by ReLU network",
          "field_b_term": "tropical polynomial in max-plus semiring",
          "note": "Every ReLU network computes a tropical polynomial; tropical geometry characterises the PL subdivision"
        },
        {
          "field_a_term": "number of linear regions of depth-d width-w ReLU network",
          "field_b_term": "number of facets of the tropical hypersurface (tropical algebraic geometry)",
          "note": "Linear regions count grows as Θ((w/n)^{nd}) for n-input network; tropical geometry proves this"
        },
        {
          "field_a_term": "depth-width trade-off in neural network expressivity",
          "field_b_term": "tropical hypersurface complexity vs. polynomial degree and dimension",
          "note": "Depth exponentially increases linear regions; breadth increases polynomially — proven by tropical methods"
        }
      ],
      "references": [
        {
          "doi": "10.48550/arXiv.1805.07091",
          "note": "Zhang et al. (2018) — tropical geometry of deep neural networks; ICML 2018"
        },
        {
          "doi": "10.48550/arXiv.1402.1869",
          "note": "Montufar et al. (2014) — on the number of linear regions of deep neural networks; NeurIPS 2014"
        }
      ],
      "last_reviewed": "2026-05-07",
      "file": "cross-domain/math-cs/b-tropical-geometry-x-neural-networks.yaml"
    },
    {
      "id": "b-island-biogeography-x-percolation",
      "title": "Island biogeography ↔ Percolation — species area relationship as connectivity threshold",
      "source_domain": "",
      "target_domain": "",
      "status": "proposed",
      "bridge_claim": "The MacArthur-Wilson species-area relationship (S = cA^z) is the biological signature of habitat percolation; below the percolation threshold, habitat patches become disconnected and species go extinct via area effects, while above threshold, the connected habitat cluster supports full diversity — u",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-island-biogeography-x-percolation"
      ],
      "communication_gap": "Island biogeography theory (MacArthur & Wilson 1967) is ecology's most influential quantitative framework, while percolation theory (Broadbent & Hammersley 1957) is a cornerstone of statistical physics. The two communities rarely interact: percolation appears in physics journals and biogeography in ecology journals. The explicit connection was made by With & Crist (1995) and Boswell et al. (1998) but remains largely unknown to mainstream ecologists.",
      "translation_table": [
        {
          "field_a_term": "species richness S = cA^z (island biogeography)",
          "field_b_term": "cluster size distribution in percolation theory",
          "note": "Power law S-A relationship mirrors the power-law cluster size distribution at percolation threshold"
        },
        {
          "field_a_term": "habitat patch (island or fragmented forest patch)",
          "field_b_term": "occupied site in site percolation model",
          "note": "Patch occupancy probability p maps to site occupation probability in bond percolation"
        },
        {
          "field_a_term": "species extinction below minimum viable area (MVA)",
          "field_b_term": "cluster isolation below percolation threshold p_c",
          "note": "Both predict abrupt collapse: species extinctions and cluster disconnection are threshold phenomena"
        },
        {
          "field_a_term": "immigration-extinction balance (MacArthur-Wilson equilibrium)",
          "field_b_term": "steady-state cluster dynamics in dynamic percolation",
          "note": "Island biogeography equilibrium is the ecological analogue of steady-state percolation"
        }
      ],
      "references": [
        {
          "doi": "10.2307/1934358",
          "note": "MacArthur & Wilson (1967) — The Theory of Island Biogeography"
        },
        {
          "doi": "10.1007/BF02395592",
          "note": "Broadbent & Hammersley (1957) — Percolation processes I; Proc Cambridge Phil Soc 53:629"
        }
      ],
      "last_reviewed": "2026-05-07",
      "file": "cross-domain/math-ecology/b-island-biogeography-x-percolation.yaml"
    },
    {
      "id": "b-lotka-volterra-x-game-theory",
      "title": "Lotka-Volterra x Evolutionary game theory — predator-prey as hawk-dove\n",
      "source_domain": "",
      "target_domain": "",
      "status": "proposed",
      "bridge_claim": "The Lotka-Volterra predator-prey equations and the replicator dynamics of evolutionary game theory are related by a coordinate transformation; the hawk-dove game's mixed Nash equilibrium corresponds to the Lotka-Volterra coexistence fixed point, unifying ecological population cycles with strategic g",
      "open_unknowns": [],
      "related_hypotheses": [],
      "communication_gap": "Ecologists studying predator-prey dynamics and game theorists studying evolutionary strategies use different notation and publish in different journals; the formal equivalence between replicator dynamics and Lotka-Volterra was established by Hofbauer and Sigmund but is not widely known outside mathematical biology.\n",
      "translation_table": [
        {
          "field_a_term": "predator-prey population cycle (ecology)",
          "field_b_term": "oscillation around mixed Nash equilibrium (game theory)",
          "note": "The neutrally stable oscillations in Lotka-Volterra correspond to limit cycles in replicator dynamics for zero-sum games"
        },
        {
          "field_a_term": "ecological coexistence fixed point (ecology)",
          "field_b_term": "mixed Nash equilibrium (game theory)",
          "note": "The interior fixed point of Lotka-Volterra (coexistence) maps to the mixed strategy Nash equilibrium of the hawk-dove game"
        },
        {
          "field_a_term": "intrinsic growth rate r of prey (ecology)",
          "field_b_term": "payoff differential between strategies (game theory)",
          "note": "The fitness difference between strategies drives frequency dynamics just as growth rate imbalance drives population cycles"
        },
        {
          "field_a_term": "carrying capacity K (ecology)",
          "field_b_term": "total population size constraint (game theory)",
          "note": "Both set the resource limitation that prevents unbounded growth of one strategy or species"
        }
      ],
      "references": [
        {
          "doi": "10.1098/rspb.1973.0005",
          "note": "Maynard Smith & Price (1973) - The logic of animal conflict; Proc R Soc B 246:15; foundational evolutionary game theory"
        }
      ],
      "last_reviewed": "2026-05-07",
      "file": "cross-domain/math-ecology/b-lotka-volterra-x-game-theory.yaml"
    },
    {
      "id": "b-percolation-x-disease-spread",
      "title": "Percolation theory x Epidemic spreading — connectivity threshold as herd immunity\n",
      "source_domain": "",
      "target_domain": "",
      "status": "proposed",
      "bridge_claim": "The SIR epidemic threshold (R0 = 1) is identical to the bond percolation critical probability on the contact network; herd immunity corresponds to the network falling below the percolation threshold, making the giant connected component subcritical.\n",
      "open_unknowns": [],
      "related_hypotheses": [],
      "communication_gap": "Mathematical percolation theory developed in statistical physics; epidemiology developed with different modeling traditions. The formal equivalence was established by Newman (2002) but is not standard in epidemiology curricula.\n",
      "translation_table": [
        {
          "field_a_term": "Bond percolation critical probability p_c",
          "field_b_term": "Epidemic threshold 1/R0",
          "note": "Removing bonds with probability (1 - 1/R0) destroys the giant component, exactly corresponding to achieving herd immunity threshold in the SIR model.\n"
        },
        {
          "field_a_term": "Giant connected component (GCC)",
          "field_b_term": "Epidemic final size (proportion infected)",
          "note": "The fraction of the population in the GCC equals the final epidemic attack rate; both exhibit power-law scaling near the critical threshold.\n"
        }
      ],
      "references": [
        {
          "doi": "10.1103/PhysRevE.66.016128",
          "note": "Newman (2002) — spread of epidemic disease on networks; percolation equivalence"
        }
      ],
      "last_reviewed": "2026-05-07",
      "file": "cross-domain/math-ecology/b-percolation-x-disease-spread.yaml"
    },
    {
      "id": "b-auction-theory-x-mechanism-design",
      "title": "Auction theory x Mechanism design — revenue equivalence as envelope theorem\n",
      "source_domain": "",
      "target_domain": "",
      "status": "proposed",
      "bridge_claim": "The revenue equivalence theorem proves that all standard auction formats (English, Dutch, sealed-bid first-price, second-price Vickrey) yield the same expected revenue given symmetric independent private values; this is a consequence of the envelope theorem in mechanism design, connecting auction th",
      "open_unknowns": [],
      "related_hypotheses": [],
      "communication_gap": "Economists developing auction theory and mathematicians developing mechanism design and optimal control theory use the same mathematical tools (envelope theorem, revelation principle, calculus of variations) but the connection between Myerson's virtual value formula and the Pontryagin maximum principle from optimal control theory is rarely made explicit in economics textbooks.\n",
      "translation_table": [
        {
          "field_a_term": "second-price (Vickrey) auction (economics)",
          "field_b_term": "incentive-compatible (strategy-proof) mechanism (mechanism design)",
          "note": "Truthful bidding is a dominant strategy in Vickrey auctions; this is the defining property of a strategy-proof direct revelation mechanism"
        },
        {
          "field_a_term": "bidder's private value v_i (auction theory)",
          "field_b_term": "type in mechanism design (mathematics)",
          "note": "The bidder's private value is their type; mechanism design studies optimal mechanisms over the distribution of types"
        },
        {
          "field_a_term": "revenue equivalence theorem (auction theory)",
          "field_b_term": "envelope theorem in calculus of variations (mathematics)",
          "note": "Revenue equivalence follows from the envelope theorem applied to the bidder's indirect utility function; only the allocation rule determines expected revenue"
        },
        {
          "field_a_term": "optimal auction / Myerson auction (economics)",
          "field_b_term": "virtual value function phi(v) = v - (1-F(v))/f(v) (mathematics)",
          "note": "Myerson's optimal auction uses virtual values to characterize the revenue-maximizing allocation rule — a pure mathematical object"
        }
      ],
      "references": [
        {
          "doi": "10.2307/1911865",
          "note": "Myerson (1981) - Optimal auction design; Mathematics of Operations Research 6:58"
        }
      ],
      "last_reviewed": "2026-05-07",
      "file": "cross-domain/math-economics/b-auction-theory-x-mechanism-design.yaml"
    },
    {
      "id": "b-extreme-value-theory-x-risk-modeling",
      "title": "Extreme Value Theory x Risk Modeling — Gumbel distribution as tail statistics\n",
      "source_domain": "",
      "target_domain": "",
      "status": "proposed",
      "bridge_claim": "Extreme value theory (Fisher-Tippett-Gnedenko theorem) proves that maxima of iid random variables converge to one of three distributions (Gumbel, Fréchet, Weibull) regardless of the underlying distribution; this universality underpins all financial VaR models, flood risk assessment, and structural f",
      "open_unknowns": [],
      "related_hypotheses": [],
      "communication_gap": "EVT was developed by statisticians and engineers for infrastructure design (flood levees, wind loads) decades before finance adopted VaR in the 1990s; regulatory capital requirements (Basel III) incorporate EVT through Expected Shortfall but many practitioners remain unaware of the full mathematical foundation.\n",
      "translation_table": [
        {
          "field_a_term": "Generalized extreme value (GEV) distribution",
          "field_b_term": "Limiting distribution of block maxima",
          "note": "The GEV unifies Gumbel (ξ=0), Fréchet (ξ>0), and Weibull (ξ<0) distributions; the shape parameter ξ (tail index) determines whether the tail is light, heavy, or bounded.\n"
        },
        {
          "field_a_term": "Fréchet domain of attraction (heavy tails)",
          "field_b_term": "Power-law tail in financial returns",
          "note": "Stock returns with power-law tails (Pareto-like) lie in the Fréchet domain, implying infinite variance for ξ > 0.5; this invalidates Gaussian VaR models for assets with fat-tailed return distributions.\n"
        },
        {
          "field_a_term": "Return period T for extreme events",
          "field_b_term": "Value at Risk (VaR) at confidence level 1-1/T",
          "note": "A 100-year flood corresponds to the 99th percentile of annual maxima; VaR at 99% is the 1-year return level — direct mapping between hydrology and finance.\n"
        },
        {
          "field_a_term": "Generalized Pareto distribution (GPD) for exceedances",
          "field_b_term": "Peaks-over-threshold model for tail risk",
          "note": "The Pickands-Balkema-de Haan theorem shows that exceedances above a high threshold converge to GPD; this enables efficient tail estimation using fewer data points than block maxima methods.\n"
        }
      ],
      "references": [
        {
          "doi": "10.1017/CBO9780511870156",
          "note": "Coles (2001) — An Introduction to Statistical Modeling of Extreme Values; Cambridge University Press"
        }
      ],
      "last_reviewed": "2026-05-07",
      "file": "cross-domain/math-economics/b-extreme-value-theory-x-risk-modeling.yaml"
    },
    {
      "id": "b-voting-theory-x-social-choice",
      "title": "Voting Theory x Social Choice — Arrow's impossibility as topological obstruction\n",
      "source_domain": "",
      "target_domain": "",
      "status": "proposed",
      "bridge_claim": "Arrow's impossibility theorem (no voting system satisfies all fairness axioms simultaneously) has a topological proof: the space of preference profiles is a simplex, and the aggregation map must have a fixed point that violates at least one axiom; this connects social choice theory to algebraic topo",
      "open_unknowns": [],
      "related_hypotheses": [],
      "communication_gap": "Arrow proved his theorem in 1951 using combinatorial axiomatics; algebraic topology developed separately; Baryshnikov's topological proof (1993) connecting Arrow to Borsuk-Ulam went largely unnoticed in economics for decades, despite providing far more geometric insight into which axiom combinations are feasible.\n",
      "translation_table": [
        {
          "field_a_term": "Social preference ordering (Arrow's axioms)",
          "field_b_term": "Continuous map from preference simplex to itself",
          "note": "Baryshnikov (1993) showed Arrow's theorem follows from the Borsuk-Ulam theorem: any continuous aggregation of n individual preferences into a social preference must violate Independence of Irrelevant Alternatives or Pareto efficiency — a fixed-point/no-retraction result in algebraic topology.\n"
        },
        {
          "field_a_term": "Independence of Irrelevant Alternatives (IIA)",
          "field_b_term": "Locality condition on the aggregation map",
          "note": "IIA requires that the social ranking of any two alternatives depends only on individual rankings of those two alternatives; topologically, this is a local/product structure condition on the aggregation map.\n"
        },
        {
          "field_a_term": "Condorcet cycle (no majority winner)",
          "field_b_term": "Non-contractible loop in preference space",
          "note": "A Condorcet cycle (A beats B, B beats C, C beats A) corresponds to a non-contractible loop in the space of pairwise preferences, explaining why majority rule cannot always produce a consistent winner.\n"
        },
        {
          "field_a_term": "Dictatorship (one voter determines outcome)",
          "field_b_term": "Fixed point of aggregation map (Brouwer's theorem)",
          "note": "Arrow's theorem proves dictatorship is the only aggregation satisfying all axioms; topologically, Brouwer's fixed point theorem guarantees a fixed point that corresponds to dictatorial choice.\n"
        }
      ],
      "references": [
        {
          "doi": "10.2307/1907435",
          "note": "Arrow (1950) — A difficulty in the concept of social welfare; J Political Economy 58:328"
        }
      ],
      "last_reviewed": "2026-05-07",
      "file": "cross-domain/math-economics/b-voting-theory-x-social-choice.yaml"
    },
    {
      "id": "b-chaos-x-ergodic-theory",
      "title": "Chaos x Ergodic theory - sensitivity as mixing\n",
      "source_domain": "",
      "target_domain": "",
      "status": "proposed",
      "bridge_claim": "Deterministic chaos (positive Lyapunov exponents, sensitive dependence on initial conditions) is the physical manifestation of ergodic mixing in measure-preserving dynamical systems; the Kolmogorov-Sinai entropy h_KS equals the sum of positive Lyapunov exponents (Pesin's theorem 1977), connecting ch",
      "open_unknowns": [],
      "related_hypotheses": [],
      "communication_gap": "Physicists studying chaos (Lorenz 1963, Ruelle & Takens 1971) and mathematicians developing ergodic theory (Birkhoff 1931, Kolmogorov 1958) built largely separate frameworks; Pesin's theorem (1977) bridged them but required graduate-level ergodic theory for physical scientists — meaning practical chaos analysis (Lyapunov exponents, correlation dimension) and rigorous ergodic theory (mixing, SRB measures) are often applied without reference to each other in applied sciences.\n",
      "translation_table": [
        {
          "field_a_term": "Lyapunov exponent lambda_1 > 0 (chaos theory)",
          "field_b_term": "KS entropy h_KS = sum of positive Lyapunov exponents (ergodic theory)",
          "note": "Pesin's theorem equates KS entropy with positive Lyapunov exponents; chaos = positive information production rate"
        },
        {
          "field_a_term": "sensitive dependence on initial conditions (SDIC) (chaos theory)",
          "field_b_term": "exponential mixing of measure in ergodic system (ergodic theory)",
          "note": "SDIC in phase space corresponds to exponential decorrelation of measures; mixing is the rigorous mathematical definition of chaos"
        },
        {
          "field_a_term": "Poincare recurrence theorem (ergodic theory)",
          "field_b_term": "near-recurrence in chaotic trajectories (chaos theory)",
          "note": "Poincare recurrence guarantees eventual return for ergodic systems; chaotic trajectories show recurrence but with exponentially long recurrence times"
        },
        {
          "field_a_term": "strange attractor with fractal dimension (chaos theory)",
          "field_b_term": "ergodic measure supported on fractal invariant set (ergodic theory)",
          "note": "Strange attractors are invariant sets supporting the SRB (Sinai-Ruelle-Bowen) ergodic measure; fractal dimension quantifies the measure's support"
        }
      ],
      "references": [
        {
          "doi": "10.1007/BF01646553",
          "note": "Ruelle & Takens (1971) - On the nature of turbulence; Commun Math Phys 20:167 — strange attractors and chaos"
        }
      ],
      "last_reviewed": "2026-05-07",
      "file": "cross-domain/math-physics/b-chaos-x-ergodic-theory.yaml"
    },
    {
      "id": "b-ergodic-theory-x-statistical-mechanics",
      "title": "Ergodic Theory x Statistical Mechanics - time average equals ensemble average\n",
      "source_domain": "",
      "target_domain": "",
      "status": "proposed",
      "bridge_claim": "The ergodic hypothesis (time averages equal ensemble averages for generic initial conditions) is the mathematical foundation of statistical mechanics; Birkhoff's ergodic theorem proves this for measure-preserving dynamical systems, while KAM theory shows many real systems are non-ergodic (integrable",
      "open_unknowns": [],
      "related_hypotheses": [],
      "communication_gap": "Boltzmann postulated ergodicity as a physical hypothesis in 1870; Birkhoff proved it mathematically in 1931; KAM theory (1954-1963) showed its failure in integrable systems; but the thermodynamic implications of non-ergodicity (many-body localization, integrable quantum systems) are still being worked out in condensed matter physics 160 years after Boltzmann.\n",
      "translation_table": [
        {
          "field_a_term": "Ergodic hypothesis (Boltzmann, Maxwell)",
          "field_b_term": "Birkhoff ergodic theorem (time average = space average a.e.)",
          "note": "Birkhoff (1931) proved that for measure-preserving flows, the time average of any integrable function converges to its space average almost everywhere - the rigorous mathematical statement of Boltzmann's ergodic hypothesis.\n"
        },
        {
          "field_a_term": "Microcanonical ensemble (uniform measure on energy surface)",
          "field_b_term": "Ergodic measure (invariant measure of the flow)",
          "note": "The microcanonical ensemble weight is the Liouville measure restricted to the energy surface; ergodicity requires this to be the unique invariant measure, which holds for generic Hamiltonians but fails for integrable ones.\n"
        },
        {
          "field_a_term": "KAM tori (quasi-periodic orbits in integrable systems)",
          "field_b_term": "Invariant tori (non-ergodic subsets of phase space)",
          "note": "KAM theory shows that integrable tori persist under small perturbations; orbits on these tori are quasi-periodic, not ergodic - they never explore the full energy surface and hence violate ergodicity.\n"
        },
        {
          "field_a_term": "Lyapunov exponents (rate of separation of nearby trajectories)",
          "field_b_term": "Pesin entropy formula (ergodic theory measure of chaos)",
          "note": "Positive Lyapunov exponents imply exponential mixing (ergodic behavior on relevant timescales); Pesin's formula relates the Kolmogorov-Sinai entropy to the sum of positive Lyapunov exponents.\n"
        }
      ],
      "references": [
        {
          "doi": "10.1103/PhysRevLett.73.2875",
          "note": "Tabor (1989) / Benettin et al. (1984) - KAM theory and breakdown of ergodicity; chaos and non-ergodicity"
        },
        {
          "doi": "10.1016/j.physrep.2014.02.007",
          "note": "D'Alessio et al. (2016) - from quantum chaos and eigenstate thermalization to statistical mechanics; Physics Reports"
        }
      ],
      "last_reviewed": "2026-05-07",
      "file": "cross-domain/math-physics/b-ergodic-theory-x-statistical-mechanics.yaml"
    },
    {
      "id": "b-knot-theory-x-quantum-gravity",
      "title": "Knot theory x Quantum gravity - Wilson loops as topological invariants\n",
      "source_domain": "",
      "target_domain": "",
      "status": "proposed",
      "bridge_claim": "In Chern-Simons topological quantum field theory and loop quantum gravity, Wilson loop observables W_gamma[A] = Tr P exp(i oint_gamma A) around closed paths gamma correspond exactly to knot invariants (Jones polynomial, HOMFLY polynomial, Kauffman bracket); the Chern-Simons path integral with gauge ",
      "open_unknowns": [],
      "related_hypotheses": [],
      "communication_gap": "Topologists developing knot invariants (Jones 1984, HOMFLY 1985) and physicists developing quantum gravity and topological field theory worked independently until Witten (1989) identified the Jones polynomial as a Chern-Simons path integral — winning the Fields Medal; but the full implications for quantum gravity (whether LQG spin networks encode complete topological invariants of spacetime) and for quantum computing (topological quantum computation using anyons) are still being developed.\n",
      "translation_table": [
        {
          "field_a_term": "Wilson loop observable W_gamma in Chern-Simons gauge theory (physics)",
          "field_b_term": "Jones polynomial V(K;t) evaluated at t = exp(2*pi*i/(k+2)) (mathematics)",
          "note": "Witten (1989) showed Wilson loops in CS theory with level k compute Jones polynomials; knot topology = quantum observable"
        },
        {
          "field_a_term": "Chern-Simons action S_CS = k/4pi int Tr(A dA + 2/3 A^3) (quantum field theory)",
          "field_b_term": "topological invariant of 3-manifold with embedded knot (topology)",
          "note": "The CS partition function is a topological invariant; changing the 3-manifold changes the invariant computed by the path integral"
        },
        {
          "field_a_term": "loop states in loop quantum gravity (LQG) (quantum gravity)",
          "field_b_term": "knot classes of embedded graphs in 3-space (knot theory)",
          "note": "LQG quantum states of geometry are spin networks; their topology is classified by knot invariants of the embedded graphs"
        },
        {
          "field_a_term": "Reidemeister moves preserving knot type (knot theory)",
          "field_b_term": "gauge invariance of Wilson loop observables (gauge theory)",
          "note": "Reidemeister moves are the physical gauge transformations; knot invariance = gauge invariance of the observable"
        }
      ],
      "references": [
        {
          "doi": "10.1007/BF01217730",
          "note": "Witten (1989) - Quantum field theory and the Jones polynomial; Commun Math Phys 121:351"
        }
      ],
      "last_reviewed": "2026-05-07",
      "file": "cross-domain/math-physics/b-knot-theory-x-quantum-gravity.yaml"
    },
    {
      "id": "b-lie-groups-x-symmetry-conservation",
      "title": "Lie groups x Conservation laws — Noether's theorem as group representation\n",
      "source_domain": "",
      "target_domain": "",
      "status": "proposed",
      "bridge_claim": "Every continuous symmetry of a physical system (described by a Lie group action on the configuration space) corresponds to a conserved quantity via Noether's theorem; U(1) phase symmetry yields charge conservation, SO(3) rotational symmetry yields angular momentum, time translation symmetry yields e",
      "open_unknowns": [],
      "related_hypotheses": [],
      "communication_gap": "Pure mathematicians developing Lie group representation theory and physicists discovering conservation laws worked in largely separate communities until the 20th century; Noether's 1915 theorem was not widely applied in physics until gauge theory was developed in the 1950s, and the full power of representation theory for particle physics only became evident with the Standard Model.\n",
      "translation_table": [
        {
          "field_a_term": "Lie group generator (Lie algebra element) (mathematics)",
          "field_b_term": "conserved Noether charge (physics)",
          "note": "Each generator of the symmetry Lie algebra corresponds to a conserved quantity; the Lie algebra structure constants encode commutation relations of conserved charges"
        },
        {
          "field_a_term": "U(1) gauge symmetry (mathematics / physics)",
          "field_b_term": "electric charge conservation (physics)",
          "note": "The invariance of the Lagrangian under U(1) phase rotations directly yields charge conservation via Noether's first theorem"
        },
        {
          "field_a_term": "SO(3) rotation group representation (mathematics)",
          "field_b_term": "quantized angular momentum eigenvalues (quantum mechanics)",
          "note": "Irreducible representations of SO(3) label quantum states by angular momentum j; the representation theory dictates the spectrum"
        },
        {
          "field_a_term": "spontaneous symmetry breaking (Lie group theory)",
          "field_b_term": "Goldstone boson / mass generation (physics)",
          "note": "When a continuous symmetry is spontaneously broken, Goldstone's theorem (Lie algebra) predicts massless modes; explicit breaking gives the Higgs mechanism"
        }
      ],
      "references": [
        {
          "doi": "10.1080/00411457108231446",
          "note": "Noether (1971, English translation) - Invariant variation problems; Transport Theory and Statistical Physics 1:186"
        }
      ],
      "last_reviewed": "2026-05-07",
      "file": "cross-domain/math-physics/b-lie-groups-x-symmetry-conservation.yaml"
    },
    {
      "id": "b-morse-theory-x-energy-landscape",
      "title": "Morse theory ↔ Energy landscapes — critical points as saddles and minima",
      "source_domain": "",
      "target_domain": "",
      "status": "proposed",
      "bridge_claim": "Morse theory classifies the topology of smooth manifolds through the critical points of a smooth function (minima, saddles, maxima); applied to potential energy surfaces in chemistry and physics, Morse theory provides a complete topological inventory of reaction pathways, metastable states, and tran",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-morse-theory-x-energy-landscape"
      ],
      "communication_gap": "Morse theory was developed in pure differential topology (Morse 1934, Milnor 1963); potential energy surfaces were developed independently in physical chemistry (London, Eyring, Evans-Polanyi 1930s). Chemists and physicists studying energy landscapes were unaware of the complete topological characterization offered by Morse theory until the work of Wales (2003) on 'Energy Landscapes' and the adoption of topological data analysis in computational chemistry.",
      "translation_table": [
        {
          "field_a_term": "Morse function f: M → R (smooth function with nondegenerate critical points)",
          "field_b_term": "potential energy surface V(x) of molecular or protein system",
          "note": "PES is a Morse function generically; Morse critical points = equilibria and transition states"
        },
        {
          "field_a_term": "Morse index μ of critical point (number of negative Hessian eigenvalues)",
          "field_b_term": "number of imaginary frequencies at transition state (saddle index)",
          "note": "μ = 0: minimum (stable state); μ = 1: saddle (transition state); μ = 2: hilltop"
        },
        {
          "field_a_term": "Morse-Witten complex (gradient flow lines connecting critical points)",
          "field_b_term": "reaction pathway network connecting stable states via transition states",
          "note": "Gradient flow lines on PES correspond to reaction paths; Morse-Witten complex is the kinetic network"
        },
        {
          "field_a_term": "Morse inequality: #{critical pts of index k} ≥ βₖ (Betti number)",
          "field_b_term": "minimum number of transition states required by topology of PES",
          "note": "Morse inequalities give lower bounds on the number of saddles on a PES from topology"
        }
      ],
      "references": [
        {
          "doi": "10.1515/9781400881802",
          "note": "Milnor (1963) — Morse Theory; Princeton University Press — foundational Morse theory text"
        },
        {
          "doi": "10.1039/b210331a",
          "note": "Wales (2003) — Energy Landscapes; Cambridge University Press"
        }
      ],
      "last_reviewed": "2026-05-07",
      "file": "cross-domain/math-physics/b-morse-theory-x-energy-landscape.yaml"
    },
    {
      "id": "b-origami-math-x-structural-engineering",
      "title": "Origami Mathematics x Structural Engineering — crease patterns as deployable mechanisms\n",
      "source_domain": "",
      "target_domain": "",
      "status": "proposed",
      "bridge_claim": "Rigid origami (flat-foldable crease patterns satisfying Kawasaki's theorem and Maekawa's theorem) provides deployable mechanical structures with prescribed folding kinematics; the stiffness and Poisson's ratio of origami metamaterials are determined by the combinatorics of the crease pattern, enabli",
      "open_unknowns": [],
      "related_hypotheses": [],
      "communication_gap": "Pure mathematicians studied origami for geometric theorem proving (angle trisection); structural engineers studied compliant mechanisms and deployable structures; the connection (origami mathematics = mechanism design) was made by Robert Lang's TreeMaker algorithm (1994) and accelerated by NASA's need for compact deployable space structures.\n",
      "translation_table": [
        {
          "field_a_term": "Kawasaki's theorem (alternating angle sum = π)",
          "field_b_term": "Kinematic constraint for single-vertex flat foldability",
          "note": "At each interior vertex, the alternating sum of sector angles equals π (flat foldability condition); this is a constraint on the mechanism degrees of freedom, preventing kinematic locking.\n"
        },
        {
          "field_a_term": "Maekawa's theorem (M-V assignment ±2)",
          "field_b_term": "Parity constraint on crease orientation",
          "note": "The number of mountain and valley creases at each vertex must differ by exactly 2; this is a necessary condition for flat foldability and constrains the configuration space of the origami mechanism.\n"
        },
        {
          "field_a_term": "Miura-ori crease pattern",
          "field_b_term": "Deployable structure with negative Poisson's ratio",
          "note": "The Miura-ori pattern (parallelogram tessellation) has negative in-plane Poisson's ratio (auxetic behavior): it expands in one direction when stretched in the other; this arises directly from the geometry of the crease pattern.\n"
        },
        {
          "field_a_term": "Origami degree of freedom (DOF)",
          "field_b_term": "Mechanism mobility in structural engineering",
          "note": "The mobility formula (Maxwell criterion) counts DOF as edges - constraints; single-DOF origami patterns are deployable mechanisms; zero-DOF patterns are rigid structures.\n"
        }
      ],
      "references": [
        {
          "doi": "10.1098/rspa.2015.0745",
          "note": "Schenk & Guest (2013) — Geometry of Miura-folded metamaterials; Proc R Soc A 469:20120433"
        }
      ],
      "last_reviewed": "2026-05-07",
      "file": "cross-domain/math-physics/b-origami-math-x-structural-engineering.yaml"
    },
    {
      "id": "b-random-walk-x-brownian-motion",
      "title": "Random walk x Brownian motion — discrete to continuum limit\n",
      "source_domain": "",
      "target_domain": "",
      "status": "proposed",
      "bridge_claim": "The continuum limit of a symmetric random walk on a lattice is Brownian motion (Wiener process); Donsker's invariance principle (functional central limit theorem) proves that this convergence holds universally for any finite-variance step distribution, unifying discrete combinatorics with continuous",
      "open_unknowns": [],
      "related_hypotheses": [],
      "communication_gap": "Probabilists studying random walks and physicists studying Brownian motion developed parallel mathematical frameworks; Donsker's 1951 theorem providing the rigorous convergence proof was not widely cited in physics literature until the development of stochastic calculus in finance made the connection essential.\n",
      "translation_table": [
        {
          "field_a_term": "random walk step on lattice (combinatorics)",
          "field_b_term": "Wiener process increment dW_t (stochastic calculus)",
          "note": "In the limit of small step size and time, the random walk distribution converges to the Gaussian distribution of Brownian increments"
        },
        {
          "field_a_term": "number of steps N (combinatorics)",
          "field_b_term": "continuous time t (stochastic calculus)",
          "note": "Rescaling by N^(1/2) in space and N in time yields the continuum limit; this is Donsker's theorem"
        },
        {
          "field_a_term": "Pascal's triangle / binomial distribution (combinatorics)",
          "field_b_term": "Gaussian heat kernel / Green's function (PDE theory)",
          "note": "The binomial distribution of random walk positions converges to the Gaussian under CLT, which is the fundamental solution of the diffusion equation"
        },
        {
          "field_a_term": "self-avoiding random walk (combinatorics / polymer physics)",
          "field_b_term": "excluded volume polymer conformation statistics (physics)",
          "note": "Self-avoiding walk exponent nu = 3/5 (Flory) deviates from nu = 1/2 for Brownian motion, capturing polymer swelling"
        }
      ],
      "references": [
        {
          "doi": "10.1090/S0002-9947-1951-0040613-0",
          "note": "Donsker (1951) - An invariance principle for certain probability limit theorems; Mem AMS 6:1"
        }
      ],
      "last_reviewed": "2026-05-07",
      "file": "cross-domain/math-physics/b-random-walk-x-brownian-motion.yaml"
    },
    {
      "id": "b-stochastic-resonance-x-signal-detection",
      "title": "Stochastic resonance x Signal detection — noise-enhanced threshold crossing\n",
      "source_domain": "",
      "target_domain": "",
      "status": "proposed",
      "bridge_claim": "Stochastic resonance — where adding noise to a subthreshold signal improves detection — is the physical mechanism behind mechanoreceptor hair cell bundle noise and neural population coding; the optimal noise level is predicted by the signal-to-noise ratio at the detection threshold.\n",
      "open_unknowns": [],
      "related_hypotheses": [],
      "communication_gap": "Stochastic resonance was discovered in climate physics (Benzi 1981) and later applied to neural systems; the quantitative connection to hair cell mechanics required experiments not completed until the late 1990s.\n",
      "translation_table": [
        {
          "field_a_term": "Optimal noise amplitude sigma_opt",
          "field_b_term": "Hair cell bundle thermal noise level",
          "note": "Both optimize at the same value: sigma_opt = signal amplitude / sqrt(2); hair cell thermal noise appears tuned near this optimum.\n"
        },
        {
          "field_a_term": "Threshold crossing rate vs noise curve (SR peak)",
          "field_b_term": "Neural detection probability vs spontaneous firing rate",
          "note": "The SR peak in physical systems corresponds to the non-monotonic dependence of detection sensitivity on spontaneous activity in sensory neurons.\n"
        }
      ],
      "references": [
        {
          "doi": "10.1038/365337a0",
          "note": "Levin & Miller (1996) — broadband neural encoding of weak signals via noise — stochastic resonance in hair cells"
        }
      ],
      "last_reviewed": "2026-05-07",
      "file": "cross-domain/math-physics/b-stochastic-resonance-x-signal-detection.yaml"
    },
    {
      "id": "b-fisher-kpp-fronts-x-wound-healing-closure-forecasting",
      "title": "Fisher-KPP traveling-front analysis can transfer from population dynamics to wound closure forecasting.",
      "source_domain": "",
      "target_domain": "",
      "status": "proposed",
      "bridge_claim": "Speculative analogy: Fisher-KPP traveling-front analysis can transfer from population dynamics to wound closure forecasting.",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-fisher-kpp-front-models-improve-wound-closure-time-forecasting"
      ],
      "communication_gap": "Domain-specific vocabularies and benchmark conventions obscure transferable mathematical structure.",
      "translation_table": [],
      "references": [
        {
          "doi": "10.1046/j.1461-0248.2003.00503.x",
          "note": "Population-dynamics baseline supporting spread-rate and coexistence modeling context."
        }
      ],
      "last_reviewed": "2026-05-08",
      "file": "cross-domain/mathematical-biology-medicine/b-fisher-kpp-fronts-x-wound-healing-closure-forecasting.yaml"
    },
    {
      "id": "b-allometry-fractal-networks",
      "title": "West-Brown-Enquist fractal network model ↔ metabolic scaling: Kleiber's law from geometry alone",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "Kleiber (1932) observed that basal metabolic rate B scales with body mass M as B ~ M^{3/4} across 20 orders of magnitude of body mass (from bacteria to blue whales). This 3/4-power law defied explanation for 60 years. West, Brown & Enquist (1997, Science) derived the exponent purely from geometry: t",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-kleiber-wave-physics",
        "h-allometric-rg-fixed-point"
      ],
      "communication_gap": "West, Brown & Enquist published in Science (1997) where the audience was broad, but the mathematical derivation (involving fractal geometry and optimal transport theory) is opaque to most biologists. The connection to Murray's law (1926, a hydraulics result) was known to physiologists but not connected to metabolic scaling. Plant ecologists use the WBE model routinely but rarely connect it to the physics of optimal transport or the mathematics of fractal dimension theory.\n",
      "translation_table": [
        {
          "field_a_term": "fractal network branching ratio",
          "field_b_term": "vascular bifurcation angle / Murray's law",
          "note": "Murray's law (r_parent^3 = sum r_child^3) is the optimization condition"
        },
        {
          "field_a_term": "space-filling fractal dimension",
          "field_b_term": "effective network dimensionality d=4",
          "note": "The extra dimension captures the fractal filling of 3D volume"
        },
        {
          "field_a_term": "terminal unit size invariance",
          "field_b_term": "capillary and mitochondrion size fixed across species",
          "note": "Sets the absolute normalization; only exponent is universal"
        },
        {
          "field_a_term": "3/4 scaling exponent",
          "field_b_term": "Kleiber's law B ~ M^{3/4}",
          "note": "Derived, not fitted; zero free parameters after fixing d=4"
        },
        {
          "field_a_term": "-1/4 scaling of rates",
          "field_b_term": "heart rate, respiratory rate, cellular division rate ~ M^{-1/4}",
          "note": "All time scales set by network transport time"
        }
      ],
      "references": [
        {
          "doi": "10.1126/science.276.5309.122",
          "note": "West, Brown & Enquist (1997) Science - original WBE model deriving 3/4 exponent"
        },
        {
          "doi": "10.1126/science.284.5420.1677",
          "note": "West et al. (1999) Science - extension to plant scaling"
        },
        {
          "doi": "10.1073/pnas.0402680101",
          "note": "Banavar et al. (2002) PNAS - alternative derivation via optimal transport"
        },
        {
          "doi": "10.1007/BF00290540",
          "note": "Murray (1926) - Murray's law for vascular branching (historical)"
        }
      ],
      "last_reviewed": "2026-05-05",
      "file": "cross-domain/mathematics-biology/b-allometry-fractal-networks.yaml"
    },
    {
      "id": "b-evolutionary-graph-fixation-probability",
      "title": "The fixation probability of a mutant in a structured population is governed by the topology of the evolutionary graph: Lieberman, Hauert & Nowak (2005) proved that certain graph topologies act as amplifiers of selection (suppressing drift) while others suppress selection (amplifying drift), with complete graphs recovering the Moran process fixation probability ρ = (1 − 1/r)/(1 − 1/r^N).\n",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "In the classical Moran process, a mutant with fitness r in a population of N individuals fixes with probability ρ_Moran = (1 − 1/r)/(1 − 1/r^N). When individuals occupy nodes of a graph and reproduction/replacement follows the graph edges, the fixation probability changes. Lieberman et al. (2005) sh",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-social-network-star-topology-innovation-fixation"
      ],
      "communication_gap": "The Moran process is a standard tool in population genetics textbooks. Graph-structured evolutionary dynamics (Lieberman 2005) bridged population genetics and graph theory in Nature but has been slowly adopted in empirical evolutionary biology. Phylogeographers studying geographic structure and population geneticists rarely use graph-theoretic amplifier/suppressor concepts.\n",
      "translation_table": [
        {
          "field_a_term": "Graph vertex = individual in the structured population",
          "field_b_term": "Individual in spatially structured population genetics model",
          "note": "Graph topology encodes the connectivity (migration/competition) structure"
        },
        {
          "field_a_term": "Star graph: amplifier of selection",
          "field_b_term": "Hierarchically structured population favouring beneficial mutant fixation",
          "note": "ρ_star > ρ_Moran for r > 1; selection is more effective on star than on complete graph"
        },
        {
          "field_a_term": "Complete graph = Moran process (all-to-all competition)",
          "field_b_term": "Well-mixed (unstructured) population with random mating",
          "note": "Identical fixation probability; spatial structure reduces to mean-field limit"
        },
        {
          "field_a_term": "Fixation probability as function of graph eigenspectrum",
          "field_b_term": "Effective population size N_e as function of migration matrix",
          "note": "Both relate population-genetic outcomes to algebraic graph properties"
        }
      ],
      "references": [
        {
          "doi": "10.1038/nature03277",
          "note": "Lieberman, Hauert & Nowak (2005) Nature – evolutionary dynamics on graphs; amplifiers and suppressors"
        },
        {
          "doi": "10.1126/science.1133755",
          "note": "Nowak (2006) Science – five rules for the evolution of cooperation on graphs"
        },
        {
          "doi": "10.1073/pnas.1100921108",
          "note": "Shakarian et al. – review of evolutionary graph theory; fixation probabilities on diverse topologies"
        },
        {
          "doi": "10.1038/s41586-021-04440-z",
          "note": "Tkadlec et al. (2021) – limits on amplifiers of natural selection under death-birth updating"
        }
      ],
      "last_reviewed": "2026-05-07",
      "file": "cross-domain/mathematics-biology/b-evolutionary-graph-fixation-probability.yaml"
    },
    {
      "id": "b-graph-theory-phylogenetics",
      "title": "Phylogenetic trees are rooted Cayley trees — graph-theoretic objects — and maximum likelihood phylogenetics maximizes P(sequences|tree, model) over a combinatorially vast tree topology space of (2n-3)!! topologies, making exact search NP-hard and requiring heuristic graph algorithms from combinatorics.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "A rooted bifurcating phylogenetic tree for n taxa is a Cayley tree — a graph with n leaves, n-1 internal nodes, and 2n-2 edges, with the property that each internal node has exactly 3 incident edges (rooted: 2 children + 1 parent). The number of distinct rooted bifurcating tree topologies = (2n-3)!!",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-tree-topology-search-polynomial-approximation"
      ],
      "communication_gap": "Phylogenetics textbooks (Felsenstein 2004) develop the combinatorics from first principles without reference to graph theory literature. Combinatorics and graph theory conferences do not typically include phylogenetics sessions. The algebraic and tropical geometry approaches to phylogenetics are known in mathematical biology but have not been adopted by the software (RAxML, IQ-TREE, MrBayes) used by practicing evolutionary biologists.\n",
      "translation_table": [
        {
          "field_a_term": "Cayley tree (labeled rooted tree)",
          "field_b_term": "rooted bifurcating phylogenetic tree",
          "note": "each topology is a distinct Cayley tree; branch lengths add metric structure"
        },
        {
          "field_a_term": "number of labeled rooted trees on n vertices = (2n-3)!!",
          "field_b_term": "number of possible evolutionary histories for n taxa",
          "note": "the combinatorial explosion that makes phylogenetics computationally hard"
        },
        {
          "field_a_term": "minimum spanning tree (MST)",
          "field_b_term": "maximum parsimony tree (fewest evolutionary changes)",
          "note": "Fitch (1971) algorithm computes exact parsimony score on a fixed tree"
        },
        {
          "field_a_term": "directed acyclic graph (DAG)",
          "field_b_term": "phylogenetic network (with reticulations for HGT, hybridization)"
        },
        {
          "field_a_term": "graph distance / tree metric",
          "field_b_term": "branch length (evolutionary divergence in substitutions per site)"
        },
        {
          "field_a_term": "nearest-neighbor interchange (NNI) on trees",
          "field_b_term": "tree rearrangement move in MCMC over phylogenetic tree space"
        }
      ],
      "references": [
        {
          "doi": "10.1007/BF01734359",
          "note": "Felsenstein (1981) — Evolutionary trees from DNA sequences; maximum likelihood method; J Mol Evol 17:368"
        },
        {
          "note": "Cavalli-Sforza & Edwards (1967) — Phylogenetic analysis: models and estimation procedures; Evolution 21:550"
        },
        {
          "doi": "10.1093/sysbio/20.4.406",
          "note": "Fitch (1971) — Toward defining the course of evolution; minimum change for a specific tree topology; Syst Zool 20:406"
        },
        {
          "note": "Felsenstein (2004) — Inferring Phylogenies; Sinauer Associates"
        }
      ],
      "last_reviewed": "2026-05-06",
      "file": "cross-domain/mathematics-biology/b-graph-theory-phylogenetics.yaml"
    },
    {
      "id": "b-graph-theory-protein-networks",
      "title": "Protein-protein interaction networks are scale-free graphs (P(k) ∝ k^{-γ}, γ ≈ 2.5) whose hub proteins are essential (lethal when deleted), whose modules correspond to functional complexes detectable by the Louvain algorithm, and whose bridging proteins (high betweenness centrality) are preferential drug targets — directly translating graph-theoretic concepts into biological and pharmacological predictions.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "The yeast interactome (~6,000 proteins, ~80,000 interactions, Jeong et al. 2001) follows a scale-free degree distribution P(k) ∝ k^{-γ} with γ ≈ 2.5 — identical mathematically to the WWW, citation networks, and power grids. Graph theory directly predicts biological consequences: hub proteins (high d",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-hub-lethality-protein-network-drug-targets"
      ],
      "communication_gap": "Graph theorists and network scientists publish in Physical Review Letters and Nature Physics; systems biologists publish in Molecular Systems Biology and Nature Methods. The mathematical properties of scale-free networks (proof of their fragility to hub attack, formal spectral gap bounds) are known to mathematicians but rarely applied quantitatively in drug target selection, where practitioners use heuristic centrality metrics without formal guarantees. Conversely, biologists have increasingly rich PPI datasets that could test graph-theoretic predictions about network evolution that remain unverified.\n",
      "translation_table": [
        {
          "field_a_term": "degree distribution P(k) (graph theory)",
          "field_b_term": "protein interaction degree (interactome topology)",
          "note": "Scale-free degree distribution means hubs dominate connectivity — as in many real networks"
        },
        {
          "field_a_term": "hub node (high degree)",
          "field_b_term": "essential protein (lethal when deleted)",
          "note": "Jeong et al. showed 60-70% of yeast proteins with k > 15 are essential"
        },
        {
          "field_a_term": "betweenness centrality (shortest path routing)",
          "field_b_term": "bottleneck protein / drug target enrichment",
          "note": "High betweenness proteins are in the information flow paths between modules — target-rich"
        },
        {
          "field_a_term": "network motif (over-represented subgraph)",
          "field_b_term": "regulatory circuit (feedforward loop, autoregulation)",
          "note": "Alon's motifs are graph-theoretic patterns that implement specific regulatory functions"
        },
        {
          "field_a_term": "community detection (modularity Q)",
          "field_b_term": "protein complex / functional module identification",
          "note": "Louvain algorithm recapitulates known complexes (ribosome, proteasome, spliceosome)"
        },
        {
          "field_a_term": "graph Laplacian spectrum (spectral graph theory)",
          "field_b_term": "functional module boundaries in PPI networks",
          "note": "Spectral clustering of PPI Laplacian matches biochemical complex boundaries"
        }
      ],
      "references": [
        {
          "note": "Jeong et al. (2001) — Lethality and centrality in protein networks",
          "doi": "10.1038/35075138"
        },
        {
          "note": "Alon et al. (1999) — Broad patterns of gene expression revealed by clustering analysis of tumor and normal colon tissues (motifs)",
          "doi": "10.1038/44935"
        },
        {
          "note": "Barabasi & Oltvai (2004) — Network biology: understanding the cell's functional organization",
          "doi": "10.1038/nrg1272"
        },
        {
          "note": "Blondel et al. (2008) — Fast unfolding of communities in large networks (Louvain)",
          "doi": "10.1088/1742-5468/2008/10/P10008"
        }
      ],
      "last_reviewed": "2026-05-06",
      "file": "cross-domain/mathematics-biology/b-graph-theory-protein-networks.yaml"
    },
    {
      "id": "b-information-geometry-evolutionary-fitness",
      "title": "The Fisher information matrix on the space of allele frequency distributions defines the Shahshahani Riemannian metric on population-genetic state space, making Amari's natural gradient descent in statistical learning the exact formal counterpart of Fisher's fundamental theorem — the rate of mean fitness increase equals the Fisher information about the selective environment.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "The space of probability distributions over a discrete variable forms a Riemannian manifold equipped with the Fisher information metric g_{ij} = E[∂_i log p · ∂_j log p], where i,j index parameters of the distribution. Amari (1985, 1998) showed that gradient descent on this manifold — the \"natural g",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-natural-gradient-selection-reaches-fitness-optimum-faster-than-euclidean"
      ],
      "communication_gap": "Amari's information geometry literature (IEEE Transactions on Information Theory, Neural Computation) and Shahshahani's work (American Mathematical Society Memoirs) are published in completely disjoint communities. Population geneticists rarely read the machine learning or differential geometry literature. The Frank (2012) synthesis was published in the Journal of Evolutionary Biology and has not been widely adopted in either statistics or machine learning. The notation difference — statisticians use Fisher information I, biologists use additive genetic variance V_A — masks the formal identity.\n",
      "translation_table": [
        {
          "field_a_term": "Fisher information matrix g_{ij} (statistics)",
          "field_b_term": "Shahshahani metric tensor on allele frequency simplex (population genetics)",
          "note": "Formally identical; derived independently in 1925 (Fisher) and 1979 (Shahshahani)"
        },
        {
          "field_a_term": "Natural gradient ∇̃L = G^{-1} ∇L (Amari)",
          "field_b_term": "Direction of steepest fitness increase in Shahshahani geometry",
          "note": "Both follow geodesics on the Fisher information manifold"
        },
        {
          "field_a_term": "Cramer-Rao lower bound: Var(θ̂) ≥ 1/I(θ)",
          "field_b_term": "Speed limit on adaptation: rate of fitness increase ≤ Fisher information",
          "note": "Frank (2012) Price equation formulation"
        },
        {
          "field_a_term": "Kullback-Leibler divergence D_KL(p||q)",
          "field_b_term": "Fitness cost of being at allele frequencies p vs. optimal q",
          "note": "KL divergence is the squared geodesic distance on the information manifold to first order"
        },
        {
          "field_a_term": "Statistical manifold (family of distributions)",
          "field_b_term": "State space of population genetic configurations",
          "note": "The manifold of multinomial distributions on alleles"
        },
        {
          "field_a_term": "Geodesic on the manifold (e-geodesic or m-geodesic)",
          "field_b_term": "Trajectory of allele frequencies under pure selection or drift",
          "note": "Selection follows m-geodesics; drift is diffusion on the manifold"
        }
      ],
      "references": [
        {
          "doi": "10.1162/089976698300017746",
          "note": "Amari (1998) Natural gradient works efficiently in learning, Neural Comput 10:251 — introduces natural gradient as geodesic descent on statistical manifold\n"
        },
        {
          "note": "Shahshahani (1979) A new mathematical framework for the study of linkage and selection, AMS Memoir 211 — derives Fisher metric on allele frequency simplex\n"
        },
        {
          "doi": "10.1111/j.1420-9101.2012.02641.x",
          "note": "Frank (2012) Natural selection. V. How to read the fundamental equations of evolutionary change in terms of information theory, J Evol Biol 25:2377 — explicitly connects Price equation to Fisher information metric\n"
        },
        {
          "note": "Ay, Jost, Lê & Schwachhöfer (2017) Information Geometry, Springer — comprehensive treatment including evolutionary applications\n"
        }
      ],
      "last_reviewed": "2026-05-06",
      "file": "cross-domain/mathematics-biology/b-information-geometry-evolutionary-fitness.yaml"
    },
    {
      "id": "b-knot-theory-dna-topology",
      "title": "DNA in cells is topologically non-trivial — replication and transcription create catenanes and knots that must be resolved by topoisomerases — and the knot invariants (linking number, writhe, twist) of circular DNA molecules determine the thermodynamic and enzymatic cost of unknotting, making algebraic topology a quantitative tool in molecular biology.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "DNA is a long polymer, and in cells it is topologically constrained: circular DNA (plasmids, bacterial chromosomes) cannot change its topology without breaking a covalent bond. The central mathematical quantities are:\n\n  Linking number Lk = Tw + Wr (White's formula)\n\nwhere Tw (twist) = number of tim",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-dna-knot-complexity-aging"
      ],
      "communication_gap": "Knot theory developed as pure mathematics from the 1920s (Alexander) through the 1980s (Jones polynomial, Thurston geometrization). The connection to DNA was discovered by molecular biologists in the 1970s (Crick 1976, Wang 1971) who needed the concept of linking number; algebraic topology tools arrived in biology much later. Topologists and molecular biologists publish in completely different journals (J Knot Theory vs. PNAS, Mol Cell). The Jones polynomial was discovered in 1984; its relationship to DNA electrophoresis patterns was explored by biophysicists (Vologodskii) but is not widely known in the mathematical topology community.\n",
      "translation_table": [
        {
          "field_a_term": "Linking number Lk (topological invariant)",
          "field_b_term": "Total number of times one DNA strand winds around the other",
          "note": "Lk = Tw + Wr; invariant under continuous deformation; changed only by topoisomerases or strand cleavage"
        },
        {
          "field_a_term": "Writhe Wr (geometric, not topological)",
          "field_b_term": "DNA supercoiling — the coiling of the double helix axis",
          "note": "Wr < 0: negative supercoil (favoured in cells, promotes melting); Wr > 0: positive (inhibits melting)"
        },
        {
          "field_a_term": "Twist Tw (geometric)",
          "field_b_term": "Base-pair stacking angle / local helix turns",
          "note": "Tw ≈ N/10.5 for relaxed B-DNA at physiological conditions"
        },
        {
          "field_a_term": "Knot type (3₁ trefoil, 4₁ figure-eight, etc.)",
          "field_b_term": "Topological state of circular DNA after replication or recombination",
          "note": "Resolved by Type II topoisomerase; minimum crossing number = minimum number of strand-passage events needed"
        },
        {
          "field_a_term": "Alexander polynomial Δ(t)",
          "field_b_term": "Knot invariant measurable from gel electrophoresis band spacing",
          "note": "Different knot types have different migration rates; the polynomial encodes the knot type"
        },
        {
          "field_a_term": "Catenane (two linked circles)",
          "field_b_term": "Interlinked sister chromatids after DNA replication",
          "note": "Must be resolved by decatenation (Type II topoisomerase) before cell division; failure = missegregation"
        },
        {
          "field_a_term": "Topoisomerase (enzyme)",
          "field_b_term": "Topological surgery on the DNA manifold",
          "note": "Type I: ±1 Dehn surgery; Type II: ±2 Dehn surgery (strand passage); energy cost = supercoiling free energy"
        }
      ],
      "references": [
        {
          "doi": "10.1073/pnas.73.8.2639",
          "note": "Crick (1976) PNAS 73:2639 — Linking numbers and nucleosomes; first precise treatment of DNA topology"
        },
        {
          "note": "Bates & Maxwell (2005) DNA Topology (Oxford UP) — comprehensive textbook on DNA topology and topoisomerases",
          "url": "https://global.oup.com/academic/product/dna-topology-9780198567328"
        },
        {
          "note": "Adams (1994) The Knot Book (Freeman) — accessible introduction to knot theory with DNA applications",
          "url": "https://bookstore.ams.org/knot"
        },
        {
          "doi": "10.1016/j.plrev.2009.01.007",
          "note": "Vologodskii (2009) Phys Life Rev 6:1 — DNA topology: fundamentals and applications; knot probability theory"
        },
        {
          "doi": "10.1073/pnas.90.13.5307",
          "note": "Rybenkov et al. (1993) PNAS 90:5307 — probability of DNA knotting depends on length and ionic conditions"
        }
      ],
      "last_reviewed": "2026-05-05",
      "file": "cross-domain/mathematics-biology/b-knot-theory-dna-topology.yaml"
    },
    {
      "id": "b-microtubule-catastrophe-dynamic-instability",
      "title": "Microtubule dynamic instability — the abrupt switch between slow growth and rapid catastrophic shrinkage — is a mathematical catastrophe in Rene Thom's sense: a bifurcation in the dynamics of GTP-cap length where the system switches discontinuously between two stable states, with the catastrophe theory unfolding predicting the dependence of switch frequency on tubulin concentration and hydrolysis rate.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "proposed",
      "bridge_claim": "Microtubules switch stochastically between polymerisation (growth, ~1 um/min) and depolymerisation (catastrophe, ~20 um/min) — a dramatic 20-fold speed difference that Mitchison & Kirschner (1984) termed dynamic instability. The GTP-cap model provides the mechanism: a layer of GTP-tubulin at the gro",
      "open_unknowns": [],
      "related_hypotheses": [],
      "communication_gap": "",
      "translation_table": [
        {
          "field_a_term": "fold catastrophe",
          "field_b_term": "GTP-cap loss triggering catastrophe",
          "note": "The abrupt collapse of the microtubule tip follows the fold catastrophe topology"
        },
        {
          "field_a_term": "control parameter space (cusp unfolding)",
          "field_b_term": "tubulin concentration vs. GTP hydrolysis rate phase diagram",
          "note": "The two control parameters unfold the catastrophe; their combination sets catastrophe frequency"
        },
        {
          "field_a_term": "stable manifold (attractor)",
          "field_b_term": "growing state (GTP-cap present) or shrinking state (cap absent)",
          "note": "Two attractors separated by the catastrophe manifold"
        },
        {
          "field_a_term": "hysteresis",
          "field_b_term": "rescue is slower than catastrophe — asymmetric switching rates",
          "note": "Predicted by catastrophe theory when switching near the fold versus the cusp"
        }
      ],
      "references": [
        {
          "doi": "10.1016/0092-8674(84)90407-5",
          "note": "Mitchison & Kirschner (1984) — dynamic instability of microtubule growth"
        },
        {
          "doi": "10.1016/j.ceb.2017.10.015",
          "note": "Bowne-Anderson et al. (2013) — microtubule dynamic instability from a local monomer-addition perspective"
        },
        {
          "doi": "10.1038/s41580-020-0262-4",
          "note": "Gudimchuk & McIntosh (2021) — probing microtubule mechanics and catastrophe"
        }
      ],
      "last_reviewed": "2026-05-07",
      "file": "cross-domain/mathematics-biology/b-microtubule-catastrophe-dynamic-instability.yaml"
    },
    {
      "id": "b-optimal-control-cancer-treatment",
      "title": "Pontryagin's maximum principle maps cancer treatment scheduling onto a Hamiltonian optimization problem — adaptive therapy exploits replicator dynamics to engineer evolutionary traps for drug-resistant clones",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "Pontryagin's maximum principle (1956) provides the mathematical framework for optimal cancer treatment: minimize ∫L(x,u,t)dt subject to ẋ = f(x,u) (tumor dynamics), where x encodes tumor and immune cell counts and u(t) is the drug dose schedule. The optimality condition H(x*,u*,λ*) ≤ H(x*,u,λ*) for ",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-pontryagin-adaptive-therapy-outperforms-mtd-solid-tumors"
      ],
      "communication_gap": "Mathematical optimal control theory is published in SIAM Journal on Control and Optimization and Automatica; cancer biology research appears in Cancer Research, Nature Cancer, and Cell. Evolutionary oncology (Gatenby group) bridges the gap but remains a small subfield. Clinical oncologists lack mathematical training in optimal control; control theorists lack biological intuition about cancer evolution. Most clinical trials still test MTD without considering adaptive alternatives motivated by optimal control theory.\n",
      "translation_table": [
        {
          "field_a_term": "Pontryagin Hamiltonian H(x,u,λ)",
          "field_b_term": "fitness landscape in tumor evolutionary dynamics",
          "note": "The Hamiltonian encodes the trade-off between tumor kill rate and immune cost — analogous to evolutionary fitness"
        },
        {
          "field_a_term": "bang-bang control (maximum principle)",
          "field_b_term": "MTD (maximum tolerated dose) chemotherapy scheduling",
          "note": "The mathematical bang-bang solution corresponds to clinical maximum-dose induction chemotherapy"
        },
        {
          "field_a_term": "replicator dynamics (evolutionary game theory)",
          "field_b_term": "clonal competition between drug-sensitive and resistant cells",
          "note": "Tumor evolution is formally described by replicator equations — resistance is a game-theoretic Nash equilibrium"
        },
        {
          "field_a_term": "costate variable λ(t) (adjoint state)",
          "field_b_term": "marginal value of healthy tissue / immune capacity",
          "note": "λ represents the shadow price of allowing tumor growth — the value of a unit of immune suppression"
        },
        {
          "field_a_term": "evolutionary trap (game theory)",
          "field_b_term": "adaptive therapy endpoint (oncology)",
          "note": "A game state where all mutant strategies are outcompeted — the goal of evolutionarily-informed therapy"
        }
      ],
      "references": [
        {
          "note": "Pontryagin, Boltyanskii, Gamkrelidze & Mishchenko (1962). The Mathematical Theory of Optimal Processes. Interscience."
        },
        {
          "doi": "10.1126/scitranslmed.3000248",
          "note": "Gatenby et al. (2009). Adaptive therapy. Sci Transl Med 1:28ra25."
        },
        {
          "note": "Martin & Teo (1994). Optimal Control of Drug Administration in Cancer Chemotherapy. World Scientific."
        },
        {
          "doi": "10.1039/c8ib00057c",
          "note": "Cunningham et al. (2018). Optimal control to develop therapeutic strategies for methotrexate administration. Integr Biol 10:1."
        }
      ],
      "last_reviewed": "2026-05-06",
      "file": "cross-domain/mathematics-biology/b-optimal-control-cancer-treatment.yaml"
    },
    {
      "id": "b-optimal-transport-cell-differentiation",
      "title": "Optimal transport theory (Kantorovich-Wasserstein) maps cell differentiation trajectories in gene expression space as geodesics on a Wasserstein manifold, formally identifying Waddington's epigenetic landscape with a Riemannian geometry and enabling reconstruction of developmental trajectories from single-cell RNA-seq snapshots without tracking individual cells over time.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "Optimal transport (OT) seeks the minimum-cost plan to morph one probability distribution into another: W_p(μ,ν) = [inf_{γ∈Γ(μ,ν)} ∫d(x,y)^p dγ(x,y)]^(1/p). In developmental biology, a population of cells at time t₁ is a probability distribution μ over gene expression space ℝ^d (d ~ 20,000 genes), an",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-optimal-transport-waddington-landscape-riemannian-geodesic"
      ],
      "communication_gap": "Optimal transport was developed by mathematicians and economists (Kantorovich won the 1975 Nobel Prize in Economics partly for this work) and remained largely outside biology until the computational biology revolution. The single-cell genomics community developed RNA velocity and trajectory inference tools (Monocle, Palantir) with limited awareness of the Wasserstein geometry literature, and the OT mathematics community is often unaware that their theory has direct biological interpretations.\n",
      "translation_table": [
        {
          "field_a_term": "probability distribution μ over ℝ^d",
          "field_b_term": "cell population distribution over gene expression space",
          "note": "Each snapshot of single-cell RNA-seq defines an empirical measure"
        },
        {
          "field_a_term": "Wasserstein distance W₂(μ,ν)",
          "field_b_term": "developmental distance between two cell states",
          "note": "W₂ is the natural metric on the space of developmental trajectories"
        },
        {
          "field_a_term": "optimal transport plan γ* ∈ Γ(μ,ν)",
          "field_b_term": "lineage map from progenitor cells to differentiated progeny",
          "note": "γ* assigns probability mass — predicts which progenitors give rise to which cell types"
        },
        {
          "field_a_term": "geodesic in Wasserstein manifold",
          "field_b_term": "cell differentiation trajectory (optimal developmental path)",
          "note": "Waddington's \"canalization\" is non-uniqueness of geodesics in curved W₂"
        },
        {
          "field_a_term": "entropy regularization parameter ε in Sinkhorn",
          "field_b_term": "stochasticity of cell fate decisions",
          "note": "ε → 0 recovers deterministic fate; ε > 0 models transcriptional noise"
        }
      ],
      "references": [
        {
          "note": "Kantorovich (1942) On the translocation of masses. Dokl Akad Nauk 37:199 [original in Russian]"
        },
        {
          "doi": "10.1016/j.cell.2019.01.006",
          "note": "Schiebinger et al. (2019) Optimal-transport analysis of single-cell gene expression identifies developmental trajectories in reprogramming. Cell 176:928"
        },
        {
          "doi": "10.1038/s41586-018-0414-6",
          "note": "La Manno et al. (2018) RNA velocity of single cells. Nature 560:494"
        },
        {
          "note": "Villani (2009) Optimal Transport: Old and New. Springer Grundlehren vol. 338"
        }
      ],
      "last_reviewed": "2026-05-06",
      "file": "cross-domain/mathematics-biology/b-optimal-transport-cell-differentiation.yaml"
    },
    {
      "id": "b-optimal-transport-vasculature",
      "title": "Optimal transport theory ↔ biological vascular and neural network architecture (Murray's law as Wasserstein flow)",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "Murray's law (1926) — that the cube of the parent vessel radius equals the sum of cubes of daughter radii at every branch point (r_0^3 = r_1^3 + r_2^3) — is the exact solution to a variational problem: minimise the total metabolic cost of maintaining and pumping blood through a branching network, su",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-allometric-rg-fixed-point"
      ],
      "communication_gap": "Murray published his law in 1926 in experimental biology journals. Optimal transport theory was developed by Kantorovich (1942) in mathematics/economics. The Wasserstein metric became central to ML and probability only in the 2010s. The connection between Murray's law and the p-Laplacian was not made explicit until Facca et al. (2021). The three communities (vascular biology, optimal transport mathematics, and network science) arrived at the same variational problem from entirely different directions across a century.\n",
      "translation_table": [
        {
          "field_a_term": "Wasserstein-1 transport cost W_1(mu, nu)",
          "field_b_term": "total metabolic cost of vascular network (pumping + maintenance)",
          "note": "Minimising W_1 on a tree gives Murray branching angles"
        },
        {
          "field_a_term": "p-Laplacian PDE with p = 3/2",
          "field_b_term": "continuous limit of Murray's branching law",
          "note": "Exact equivalence proved by Facca et al. 2021"
        },
        {
          "field_a_term": "source measure mu",
          "field_b_term": "cardiac output distribution",
          "note": "Where blood enters the network"
        },
        {
          "field_a_term": "sink measure nu",
          "field_b_term": "tissue oxygen demand distribution",
          "note": "Where blood is consumed; drives network geometry"
        },
        {
          "field_a_term": "transport plan (coupling)",
          "field_b_term": "network topology (which vessels connect which tissues)",
          "note": "Optimal coupling = optimal routing"
        },
        {
          "field_a_term": "branching exponent gamma in r_0^gamma = r_1^gamma + r_2^gamma",
          "field_b_term": "Horton-Strahler stream order exponent",
          "note": "gamma=3 (Murray, metabolic cost); gamma=2 (surface minimisation)"
        }
      ],
      "references": [
        {
          "doi": "10.1073/pnas.12.3.207",
          "note": "Murray (1926) PNAS — original branching law derivation"
        },
        {
          "doi": "10.1007/978-3-319-20828-2",
          "note": "Santambrogio (2015) — Optimal Transport for Applied Mathematicians"
        },
        {
          "doi": "10.1137/20M1383296",
          "note": "Facca et al. (2021) SIAM J. Appl. Math. — p-Laplacian = Murray's law"
        },
        {
          "doi": "10.1126/science.abn2727",
          "note": "Ronellenfitsch & Katifori (2016) — optimal transport in leaf venation"
        }
      ],
      "last_reviewed": "2026-05-05",
      "file": "cross-domain/mathematics-biology/b-optimal-transport-vasculature.yaml"
    },
    {
      "id": "b-renormalization-biological-scaling",
      "title": "The renormalization group explains why biological allometric scaling laws are power laws with universal exponents — metabolic scaling, growth rates, and lifespan all emerge from the same fixed-point structure that governs critical phenomena in statistical physics.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "proposed",
      "bridge_claim": "The renormalization group (RG) is the standard physics explanation for why power laws arise universally near critical points: when you \"coarse-grain\" a system (average out short-scale details), the long-scale behaviour flows to a fixed point characterised by a small set of exponents determined only ",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-kleiber-wave-physics",
        "h-allometric-rg-fixed-point"
      ],
      "communication_gap": "West, Brown, and Enquist (1997) derived the 3/4 exponent and explicitly noted the fractal/self-similar structure of vascular trees, but stopped short of an RG calculation. Statistical physicists who work on RG are generally unaware that biological scaling is an open problem — they would likely classify it as a solved problem (it looks like a fixed-point calculation) without realising that the correction-to-scaling predictions have never been tested experimentally. The barrier is disciplinary: the biology journals that publish allometric scaling do not routinely receive manuscripts using Wilson-Fisher renormalization.\n",
      "translation_table": [
        {
          "field_a_term": "renormalization group fixed point",
          "field_b_term": "allometric scaling law (universal quarter-power exponents)"
        },
        {
          "field_a_term": "universality class",
          "field_b_term": "quarter-power scaling shared by all vascular taxa"
        },
        {
          "field_a_term": "relevant perturbation (changes critical exponent)",
          "field_b_term": "vascular geometry change that alters the scaling exponent (e.g. non-area-preserving branching)"
        },
        {
          "field_a_term": "irrelevant perturbation (flows to zero under RG)",
          "field_b_term": "organism-specific vascular geometry detail that changes prefactor but not exponent"
        },
        {
          "field_a_term": "correction-to-scaling term (finite-size effect)",
          "field_b_term": "deviation from Kleiber's Law in small organisms (below ~1g body mass)"
        },
        {
          "field_a_term": "coarse-graining (averaging out short-scale modes)",
          "field_b_term": "summing metabolic contributions over a vascular subtree (recursive branching)"
        },
        {
          "field_a_term": "Wilson-Fisher epsilon expansion",
          "field_b_term": "systematic perturbation theory for allometric exponents around the area-preserving branching limit"
        }
      ],
      "references": [
        {
          "doi": "10.1126/science.276.5309.122",
          "note": "West, Brown, Enquist (1997) — WBE theory of allometric scaling; derives 3/4 from area-preserving branching"
        },
        {
          "doi": "10.1038/35007655",
          "note": "West et al. (2001) — extended allometric scaling to growth rates and lifespans"
        },
        {
          "arxiv": "cond-mat/9812315",
          "note": "Wilson-Fisher RG and corrections to scaling — the theoretical toolkit needed"
        },
        {
          "arxiv": "q-bio/0612023",
          "note": "Savage et al. — deviations from Kleiber below 1g body mass; exactly the correction-to-scaling regime"
        }
      ],
      "last_reviewed": "2026-05-04",
      "file": "cross-domain/mathematics-biology/b-renormalization-biological-scaling.yaml"
    },
    {
      "id": "b-stochastic-gene-expression-noise",
      "title": "Stochastic gene expression is governed by the same master-equation noise physics that describes photon counting and radioactive decay — intrinsic shot noise (1/√N) plus extrinsic cell-to-cell variation — and bursty transcription (Fano factor > 1) enables biological bet-hedging as a mathematically optimal risk-diversification strategy.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "Gene expression is a stochastic birth-death process: the two-state promoter (ON/OFF) obeys a master equation dP(n,t)/dt = k_on·P(n,OFF) - k_off·P(n,ON) + production and degradation terms. Intrinsic noise (fluctuations within a single cell from random molecular events) scales as 1/√⟨N⟩ — identical to",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-stochastic-gene-expression-bet-hedging-optimal-noise"
      ],
      "communication_gap": "Stochastic processes and master equations are standard graduate mathematics and physics curricula but rarely appear in molecular biology training. Conversely, biologists studying transcription regulation seldom frame their work in terms of Fano factors or noise decomposition. The bet-hedging—Kelly-criterion connection is essentially unknown outside mathematical biology and evolutionary theory.\n",
      "translation_table": [
        {
          "field_a_term": "master equation (probability distribution over copy number)",
          "field_b_term": "promoter switching kinetics (ON/OFF transcription)",
          "note": "The two-state promoter master equation is analytically solvable and yields a Beta-binomial compound-Poisson mRNA distribution"
        },
        {
          "field_a_term": "shot noise (Poisson, F=1)",
          "field_b_term": "constitutive transcription (single state, no bursting)",
          "note": "Unregulated promoters show near-Poisson statistics; regulated promoters show F >> 1"
        },
        {
          "field_a_term": "Fano factor F = σ²/μ",
          "field_b_term": "transcriptional burst size (b = k_on/k_off)",
          "note": "F > 1 in bursty genes such as β-globin; F ~ 1 in housekeeping genes"
        },
        {
          "field_a_term": "noise decomposition (intrinsic + extrinsic)",
          "field_b_term": "dual-reporter assay (two-colour fluorescent proteins from identical loci)",
          "note": "Elowitz et al. (2002) used two differently coloured reporters to separate intrinsic from extrinsic noise"
        },
        {
          "field_a_term": "Kelly criterion / geometric mean maximisation",
          "field_b_term": "bet-hedging via stochastic switching",
          "note": "Both strategies sacrifice arithmetic-mean performance to maximise long-run geometric growth under uncertainty"
        }
      ],
      "references": [
        {
          "doi": "10.1126/science.1070919",
          "note": "Elowitz et al. (2002) Science 297:1183 — first dual-reporter separation of intrinsic and extrinsic noise in E. coli"
        },
        {
          "doi": "10.1371/journal.pbio.0040309",
          "note": "Raj et al. (2006) PLoS Biol 4:e309 — single-molecule FISH reveals transcriptional bursting; Fano factor quantification"
        },
        {
          "doi": "10.1038/nature02106",
          "note": "Paulsson (2004) Nature 427:415 — noise in genetic regulatory networks; master equation framework and noise decomposition"
        },
        {
          "doi": "10.1073/pnas.162041399",
          "note": "Swain et al. (2002) PNAS 99:12795 — intrinsic and extrinsic contributions to stochasticity in gene expression"
        }
      ],
      "last_reviewed": "2026-05-06",
      "file": "cross-domain/mathematics-biology/b-stochastic-gene-expression-noise.yaml"
    },
    {
      "id": "b-tda-protein-structure",
      "title": "Persistent homology applied to protein atomic coordinates tracks topological features (voids, tunnels, loops) across length scales via Betti numbers, providing a geometry-independent structural fingerprint that detects allosteric cavities and folding intermediates invisible to sequence analysis.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "The alpha complex of a protein's atomic coordinates (each atom as a point cloud) carries topological information at all length scales simultaneously. Persistent homology tracks how topological features — β₀ (connected components), β₁ (loops/tunnels), β₂ (enclosed voids) — are born and die as the fil",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-persistent-homology-allosteric-tunnels"
      ],
      "communication_gap": "Topological data analysis was developed in pure mathematics (Edelsbrunner, Carlsson, Zomorodian) in a computational geometry context. Structural biologists trained in X-ray crystallography and cryo-EM have limited exposure to algebraic topology. The first cross-disciplinary papers (Xia & Wei 2014, Cang & Wei 2017) appeared in biomedical engineering journals rather than structural biology journals, limiting their uptake by the primary audience.\n",
      "translation_table": [
        {
          "field_a_term": "simplicial filtration (alpha complex at radius ε)",
          "field_b_term": "protein structure at a given resolution length scale",
          "note": "growing ε reveals progressively coarser structural features"
        },
        {
          "field_a_term": "β₀ (number of connected components)",
          "field_b_term": "number of disconnected structural domains at scale ε"
        },
        {
          "field_a_term": "β₁ (number of independent loops/tunnels)",
          "field_b_term": "tunnels and channels through protein structure (allosteric access paths)"
        },
        {
          "field_a_term": "β₂ (number of enclosed voids)",
          "field_b_term": "internal cavities and hydrophobic cores"
        },
        {
          "field_a_term": "persistence diagram (birth–death pairs)",
          "field_b_term": "multi-scale structural fingerprint of the protein"
        },
        {
          "field_a_term": "bottleneck distance between diagrams",
          "field_b_term": "structural dissimilarity metric independent of alignment"
        }
      ],
      "references": [
        {
          "doi": "10.1007/s00454-002-2885-2",
          "note": "Edelsbrunner et al. (2002) Topological persistence and simplification. Discrete Comput Geom 28:511"
        },
        {
          "doi": "10.1002/jnm.1974",
          "note": "Xia & Wei (2014) Persistent homology analysis of protein structure. Int J Numer Method Biomed Eng 30:814"
        },
        {
          "doi": "10.1371/journal.pcbi.1005780",
          "note": "Cang & Wei (2017) TopologyNet for protein–ligand binding affinity prediction. PLOS Comput Biol 13:e1005780"
        },
        {
          "note": "Kovacev-Nikolic et al. (2016) Using persistent homology and dynamical distances to analyze protein binding. Mol Based Math Biol 4:1"
        }
      ],
      "last_reviewed": "2026-05-06",
      "file": "cross-domain/mathematics-biology/b-tda-protein-structure.yaml"
    },
    {
      "id": "b-tensor-networks-neural-circuits",
      "title": "Tensor Networks and Neural Circuits — matrix product states, DMRG, and tensor decomposition unify quantum many-body physics, transformer attention, and synaptic weight structure",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "Tensor networks (TN) are graphical representations of high-dimensional arrays in which each tensor is a node and contractions between shared indices are edges. Matrix product states (MPS) represent a 1D quantum state |ψ⟩ = Σ_{s₁…sₙ} Tr(A^{s₁}A^{s₂}…A^{sₙ})|s₁…sₙ⟩ where each A^{sᵢ} is a χ×χ matrix — ",
      "open_unknowns": [],
      "related_hypotheses": [],
      "communication_gap": "Quantum information scientists developing TN methods (White, Vidal, Cirac) and neuroscientists studying neural population dynamics (Cunningham, Yu, Shenoy) have developed parallel low-rank decomposition methods without substantial cross- citation. The machine learning community independently rediscovered tensor methods for neural network compression. The Levine et al. (2019) paper explicitly connecting deep networks and TNs has begun bridging quantum physics and ML communities but remains largely unknown to systems neuroscientists.\n",
      "translation_table": [
        {
          "field_a_term": "matrix product state (MPS) with bond dimension χ",
          "field_b_term": "low-rank factorisation of synaptic weight tensor",
          "note": "χ controls entanglement in MPS; rank controls expressibility in neural weight decomposition — formally equivalent"
        },
        {
          "field_a_term": "DMRG variational optimisation of MPS",
          "field_b_term": "gradient descent on low-rank weight matrices in a compressed neural network",
          "note": "Both sweep iteratively through sites/layers, optimising one tensor at a time while holding others fixed"
        },
        {
          "field_a_term": "area law of entanglement entropy",
          "field_b_term": "low intrinsic dimensionality of neural population activity",
          "note": "Both justify compressed representations: physical and neural systems of interest live in a small corner of their full state space"
        },
        {
          "field_a_term": "tensor contraction (einsum along shared indices)",
          "field_b_term": "attention mechanism in transformers (Q·K^T softmax V)",
          "note": "Levine et al. 2019: transformer attention is a specific tensor network contraction; the same algebra governs quantum circuit simulation"
        },
        {
          "field_a_term": "CP decomposition (rank-1 terms Σ aᵢ ⊗ bᵢ ⊗ cᵢ)",
          "field_b_term": "factorisation of three-way neural covariance into interpretable components",
          "note": "Applied to neural calcium imaging data to extract neuron × time × trial structure"
        },
        {
          "field_a_term": "MERA (multi-scale entanglement renormalisation ansatz)",
          "field_b_term": "hierarchical compression of spatial neural data at multiple scales",
          "note": "MERA implements a discrete RG flow; analogous to hierarchical processing in sensory cortex"
        }
      ],
      "references": [
        {
          "doi": "10.1103/PhysRevLett.69.2863",
          "note": "White (1992) Phys Rev Lett 69:2863 — DMRG algorithm"
        },
        {
          "doi": "10.1016/j.aop.2014.06.013",
          "note": "Orus (2014) Ann Phys 349:117 — tensor network review"
        },
        {
          "doi": "10.1103/PhysRevLett.122.065301",
          "note": "Levine et al. (2019) Phys Rev Lett 122:065301 — deep learning and tensor networks"
        },
        {
          "note": "Ansuini et al. (2019) NeurIPS 32 — intrinsic dimensionality of neural representations"
        }
      ],
      "last_reviewed": "2026-05-06",
      "file": "cross-domain/mathematics-biology/b-tensor-networks-neural-circuits.yaml"
    },
    {
      "id": "b-topology-morphogenesis",
      "title": "Topological defects in condensed-matter physics — liquid crystal disclinations, magnetic vortices — are the same mathematical objects that organise physical forces during embryonic organ formation.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "proposed",
      "bridge_claim": "In condensed-matter physics, topological defects are points or lines where the local order parameter (e.g. the director field of a liquid crystal) cannot be defined continuously, characterised by a quantised winding number or charge. These defects are topologically protected — they cannot be removed",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-topological-defect-morphogenesis"
      ],
      "communication_gap": "Developmental biologists and mathematical physicists have essentially no shared conference venues, training overlap, or literature crossover. Developmental biology is empirically driven (model organisms: Drosophila, zebrafish, C. elegans) and uses molecular genetics terminology; mathematical physics uses differential geometry and homotopy theory. The insight that actomyosin can be treated as a vector order parameter with nematic symmetry has only recently appeared in biophysics journals (Physical Biology, eLife Physics) that neither community's mainstream reads. Grant panels for developmental biology do not routinely include mathematical physicists, and vice versa.\n",
      "translation_table": [
        {
          "field_a_term": "director field (liquid crystal physics)",
          "field_b_term": "myosin-II polarisation field (developmental biology)",
          "note": "The order parameter whose topological properties determine defect structure"
        },
        {
          "field_a_term": "disclination charge / winding number",
          "field_b_term": "morphogenetic organiser strength",
          "note": "Quantised invariant characterising each defect's influence on surrounding material"
        },
        {
          "field_a_term": "topological protection",
          "field_b_term": "robustness of morphogenetic axis specification",
          "note": "Why the same body axes reliably emerge despite molecular noise"
        },
        {
          "field_a_term": "defect pair creation / annihilation",
          "field_b_term": "tissue layer separation / fusion events",
          "note": "Topological transitions that correspond to major morphogenetic events"
        },
        {
          "field_a_term": "Frank elastic constants",
          "field_b_term": "tissue mechanical anisotropy parameters",
          "note": "The material-dependent stiffness constants entering the free-energy functional"
        },
        {
          "field_a_term": "Kibble-Zurek mechanism (defect density after quench)",
          "field_b_term": "defect density after developmental symmetry breaking",
          "note": "Universal scaling law for how many defects form at a phase transition — may predict morphogenetic reproducibility"
        }
      ],
      "references": [
        {
          "arxiv": "2602.09867",
          "note": "Cell adhesion and topology at the core of morphogenesis — seeding paper"
        },
        {
          "doi": "10.1126/science.aar5663",
          "note": "Saw et al. 2017 — topological defects in epithelia trigger apoptosis (pioneer paper)"
        },
        {
          "doi": "10.1038/s41567-019-0561-9",
          "note": "Maroudas-Sacks et al. 2020 — topological defects control hydra body axis"
        }
      ],
      "last_reviewed": "2026-05-04",
      "file": "cross-domain/mathematics-biology/b-topology-morphogenesis.yaml"
    },
    {
      "id": "b-turing-reaction-diffusion",
      "title": "Turing reaction-diffusion instability ↔ biological pattern formation (digits, stripes, spots)",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "Turing (1952) showed that two diffusing morphogens — a short-range activator and a long-range inhibitor — spontaneously break spatial symmetry and produce periodic patterns (stripes, spots) when the inhibitor diffuses faster than the activator (D_inhibitor / D_activator > d_c).  The predicted wavele",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-turing-zebrafish-diffusivity-ratio"
      ],
      "communication_gap": "Turing's 1952 paper was largely ignored by biologists for two decades — it appeared in a mathematics journal (Phil Trans R Soc B) and used PDEs unfamiliar to developmental biologists of the era.  The Gierer-Meinhardt (1972) reformulation reached a wider biology audience, but quantitative experimental tests were blocked by the inability to measure morphogen diffusivities in live embryos until fluorescence correlation spectroscopy matured in the 2000s.\n",
      "translation_table": [
        {
          "field_a_term": "activator diffusivity D_u",
          "field_b_term": "Nodal / Wnt signalling range",
          "note": "Short-range self-activating morphogen"
        },
        {
          "field_a_term": "inhibitor diffusivity D_v",
          "field_b_term": "Lefty / BMP signalling range",
          "note": "Long-range inhibitor; must satisfy D_v >> D_u"
        },
        {
          "field_a_term": "instability criterion d = D_v/D_u > d_c",
          "field_b_term": "condition for spontaneous pattern formation",
          "note": "d_c depends only on the linearised reaction Jacobian"
        },
        {
          "field_a_term": "characteristic wavelength Lambda*",
          "field_b_term": "stripe period / digit spacing / follicle pitch",
          "note": "Observable spatial frequency predicted from kinetics alone"
        },
        {
          "field_a_term": "stripe vs spot selection",
          "field_b_term": "species-specific skin pattern",
          "note": "Determined by domain geometry and nonlinear saturation terms"
        }
      ],
      "references": [
        {
          "doi": "10.1098/rstb.1952.0012",
          "note": "Turing (1952) — original reaction-diffusion morphogenesis paper"
        },
        {
          "doi": "10.1126/science.1219671",
          "note": "Müller et al. (2012) — FCS measurement of Nodal/Lefty diffusivities"
        },
        {
          "doi": "10.1126/science.1252960",
          "note": "Raspopovic et al. (2014) — Wnt/BMP Turing model for mouse digits"
        },
        {
          "doi": "10.1126/science.338.6113.1476",
          "note": "Sheth et al. (2012) — wavelength scaling via Hox knockouts"
        }
      ],
      "last_reviewed": "2026-05-05",
      "file": "cross-domain/mathematics-biology/b-turing-reaction-diffusion.yaml"
    },
    {
      "id": "b-voronoi-tessellation-cellular-architecture",
      "title": "Biological tissues self-organise into Voronoi tessellations — the same space-partitioning geometry that minimises interface energy in soap foams and maximises packing efficiency in engineered materials.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "Voronoi tessellations (Dirichlet regions) partition space into cells based on nearest- neighbour distance, minimising total interface area. Biological tissues independently converge on this geometry: epithelial cell sheets, insect compound eye facets, plant epidermis, and cortical column arrangement",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-biomineralisation-voronoi-control"
      ],
      "communication_gap": "Voronoi geometry is taught in computational geometry and operations research; cell biology teaches tissue organisation without reference to the underlying geometric optimisation principle. The unification is known in biophysics but rarely crosses into mainstream cell biology teaching or practice.\n",
      "translation_table": [
        {
          "field_a_term": "Voronoi region (point set)",
          "field_b_term": "cell territory (biological tissue)",
          "note": "Each Voronoi region is the set of points closer to one generator than any other"
        },
        {
          "field_a_term": "interface energy minimisation",
          "field_b_term": "cell membrane tension minimisation",
          "note": "Physical basis for convergence to Voronoi geometry in both systems"
        },
        {
          "field_a_term": "Lewis law (topology)",
          "field_b_term": "cell area-side number relationship",
          "note": "Empirically confirmed in plant, insect, and mammalian epithelia"
        },
        {
          "field_a_term": "Aboav-Weaire law",
          "field_b_term": "neighbour cell side-number correlation",
          "note": "Statistical law linking adjacent cell topology; holds in biological and physical foams"
        }
      ],
      "references": [
        {
          "note": "Voronoi, G. (1908). Nouvelles applications des paramètres continus à la théorie des formes quadratiques. J Reine Angew Math 134:198."
        },
        {
          "doi": "10.1002/ar.1090380311",
          "note": "Lewis, F.T. (1928). The correlation between cell division and the shapes and sizes of prismatic cells in the epidermis. Anat Rec 38:341."
        },
        {
          "doi": "10.1016/0022-5193(78)90274-0",
          "note": "Honda, H. (1978). Description of cellular patterns by Dirichlet domains: the two-dimensional case. J Theor Biol 72:523."
        },
        {
          "note": "Weaire, D. & Hutzler, S. (1999). The Physics of Foams. Oxford University Press."
        }
      ],
      "last_reviewed": "2026-05-06",
      "file": "cross-domain/mathematics-biology/b-voronoi-tessellation-cellular-architecture.yaml"
    },
    {
      "id": "b-knot-theory-dna-topology",
      "title": "Knot invariants (Alexander, Jones, HOMFLY polynomials) characterize DNA knot and catenane types arising during replication and viral packaging, with topoisomerase II inhibitor chemotherapy agents exploiting the essential unknotting reaction — bridging abstract knot theory with molecular biology and pharmacology.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "DNA is a physical implementation of knot theory. Circular DNA molecules (plasmids, viral genomes, mitochondrial DNA) are closed loops that can be knotted or linked (catenated). The topological state is biologically critical: knotted DNA cannot be replicated or transcribed, and catenated daughter chr",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-topo-ii-inhibitor-transcription-coupled-dna-damage-selectivity"
      ],
      "communication_gap": "Knot theory is taught in mathematics departments (topology courses) using abstract algebraic machinery (skein relations, Seifert surfaces, homology) inaccessible to biologists. DNA topology is taught in biochemistry using physical intuition (supercoiling, topoisomerases) without formal topological language. The Jones polynomial discovery (1985) emerged from von Neumann algebra theory — quantum groups — entirely within abstract mathematics, but its DNA applications were realized primarily by Sumners and colleagues working at the mathematical biology interface. Most biochemists using gel electrophoresis to characterize DNA knots are unaware of the Jones polynomial or its potential for distinguishing biological knot states.\n",
      "translation_table": [
        {
          "field_a_term": "knot K in S³ (mathematical closed curve)",
          "field_b_term": "closed circular DNA molecule with knotted topology"
        },
        {
          "field_a_term": "Alexander polynomial Δ(t) — knot invariant from Seifert matrix",
          "field_b_term": "electrophoretic mobility pattern of DNA knot (partial fingerprint)"
        },
        {
          "field_a_term": "Jones polynomial V(t) — from Temperley-Lieb algebra / von Neumann algebras",
          "field_b_term": "complete topological fingerprint of DNA knot chirality"
        },
        {
          "field_a_term": "skein relation (crossing change relates knot polynomials)",
          "field_b_term": "topoisomerase reaction (strand passage changes topology by ±1 or ±2)"
        },
        {
          "field_a_term": "catenane (two linked but unknotted circles)",
          "field_b_term": "daughter chromosomes joined after replication (solved by topo II)"
        },
        {
          "field_a_term": "torus knot T(2,n)",
          "field_b_term": "DNA knot created by phage capsid geometry (P4 phage packaging)"
        },
        {
          "field_a_term": "writhe Wr (integral formula for supercoiling)",
          "field_b_term": "superhelical density σ (measured by intercalator titration)"
        },
        {
          "field_a_term": "unknotting number u(K) — minimum strand passages to unknot",
          "field_b_term": "minimum number of topoisomerase II reactions needed for segregation"
        }
      ],
      "references": [
        {
          "doi": "10.2307/1989828",
          "note": "Alexander (1928) Trans Am Math Soc 30:275 — topological invariants of knots and links (Alexander polynomial)"
        },
        {
          "doi": "10.1090/S0273-0979-1985-15361-3",
          "note": "Jones (1985) Bull Am Math Soc 12:103 — a polynomial invariant for knots via von Neumann algebras (Jones polynomial)"
        },
        {
          "doi": "10.1016/S0021-9258(18)89554-0",
          "note": "Dean et al. (1985) J Biol Chem 260:4975 — duplex DNA knots produced by Escherichia coli topoisomerase I"
        },
        {
          "note": "Sumners (1995) Not Am Math Soc 42:528 — lifting the curtain: using topology to probe the hidden action of enzymes"
        }
      ],
      "last_reviewed": "2026-05-06",
      "file": "cross-domain/mathematics-chemistry/b-knot-theory-dna-topology.yaml"
    },
    {
      "id": "b-approximation-theory-deep-learning",
      "title": "Universal approximation theory establishes that neural networks with sufficient depth/width can approximate any continuous function to arbitrary precision; depth separation theorems show that deep networks require exponentially fewer neurons than shallow networks for compositional functions, grounding the empirical success of deep learning in classical Sobolev approximation theory.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "Universal approximation theorem (Cybenko 1989, Hornik et al. 1989): a feedforward neural network with one hidden layer and sufficient neurons can approximate any continuous function on a compact domain to arbitrary precision. Depth separation theorems: exponentially more neurons required in shallow ",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-depth-separation-compositional-function-approximation"
      ],
      "communication_gap": "Approximation theory is a mathematical analysis subdiscipline with its own journals (Journal of Approximation Theory) rarely read by machine learning practitioners. The universal approximation theorems are cited but the Sobolev space framework, minimax rates, and depth separation proofs are known mainly to mathematical statisticians rather than deep learning practitioners.\n",
      "translation_table": [
        {
          "field_a_term": "Kolmogorov superposition (functions of n variables = compositions of 1D functions)",
          "field_b_term": "multilayer neural network (hierarchical composition of univariate activations)",
          "note": "Kolmogorov (1957) directly inspired early neural network theory"
        },
        {
          "field_a_term": "Sobolev smoothness class W^{s,p}(Ω)",
          "field_b_term": "function class that determines required network depth/width",
          "note": "smoother functions need fewer parameters; s/d determines approximation rate"
        },
        {
          "field_a_term": "minimax optimal approximation rate n^{-s/d}",
          "field_b_term": "sample complexity bound for learning with n parameters",
          "note": "approximation error + estimation error determines generalization bound"
        },
        {
          "field_a_term": "depth separation theorem",
          "field_b_term": "empirical observation that deep networks outperform shallow ones",
          "note": "theoretical depth separation explains practical deep learning advantage"
        },
        {
          "field_a_term": "ReLU activation function (piecewise linear)",
          "field_b_term": "B-spline basis for piecewise polynomial approximation",
          "note": "ReLU networks are equivalent to adaptive piecewise polynomial approximators"
        }
      ],
      "references": [
        {
          "doi": "10.1007/BF02551274",
          "note": "Cybenko (1989) Math Control Signals Syst 2:303 — universal approximation"
        },
        {
          "doi": "10.1016/0893-6080(89)90020-8",
          "note": "Hornik et al. (1989) Neural Netw 2:359 — universal approximators"
        },
        {
          "doi": "10.1109/18.256500",
          "note": "Barron (1993) IEEE Trans Inf Theory 39:930 — approximation rates"
        },
        {
          "note": "Kolmogorov (1957) Dokl Akad Nauk SSSR 114:953 — superposition theorem"
        }
      ],
      "last_reviewed": "2026-05-06",
      "file": "cross-domain/mathematics-computer-science/b-approximation-theory-deep-learning.yaml"
    },
    {
      "id": "b-bond-percolation-x-cyber-lateral-movement",
      "title": "Bond/site percolation thresholds on graphs ↔ lateral movement probability and blast-radius growth in enterprise networks (probability ↔ cybersecurity)\n",
      "source_domain": "",
      "target_domain": "",
      "status": "proposed",
      "bridge_claim": "Lateral movement after initial compromise is often modeled as random or attacker-chosen hops on a graph of hosts, accounts, and trust relationships. Bond percolation (edges open with probability p) and site percolation (nodes compromised) both exhibit a sharp emergence of giant connected components ",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-zero-trust-control-raises-effective-percolation-threshold"
      ],
      "communication_gap": "Security vendors rarely publish topology sufficient for percolation fits; mathematicians rarely encode MFA, conditional access, or EDR containment in edge probabilities. Shared datasets are scarce.\n",
      "translation_table": [
        {
          "field_a_term": "bond occupation probability p",
          "field_b_term": "probability an exposed trust edge is actually usable by an intruder",
          "note": "Controlled by MFA, tiering, and least privilege."
        },
        {
          "field_a_term": "giant component emergence at p_c",
          "field_b_term": "domain-wide compromise reachable from one foothold",
          "note": "Operational blast radius analog."
        },
        {
          "field_a_term": "finite-size scaling near threshold",
          "field_b_term": "probability that a red-team exercise reaches crown-jewel assets in bounded time",
          "note": "Stress-test interpretation."
        }
      ],
      "references": [
        {
          "doi": "10.1103/RevModPhys.45.574",
          "note": "Essam (1973) — percolation theory review (classical mathematical anchor)."
        },
        {
          "url": "https://csrc.nist.gov/publications/detail/sp/800-207/final",
          "note": "NIST SP 800-207 — Zero Trust Architecture (defensive segmentation framing)."
        }
      ],
      "last_reviewed": "2026-05-07",
      "file": "cross-domain/mathematics-computer-science/b-bond-percolation-x-cyber-lateral-movement.yaml"
    },
    {
      "id": "b-cahn-hilliard-phase-separation-x-diffuse-interface-segmentation",
      "title": "Cahn-Hilliard phase-separation models and diffuse-interface image segmentation share an energy-minimization template: interfaces are penalized by smoothness and contrast terms while domains evolve toward separated phases or labeled regions.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "proposed",
      "bridge_claim": "The bridge is mathematical rather than material: segmentation algorithms can borrow phase-field regularization intuition, but image classes are not thermodynamic phases. The useful transfer is in interface-width, curvature, and energy-landscape diagnostics.\n",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-interface-width-regularization-predicts-segmentation-stability"
      ],
      "communication_gap": "Materials modelers focus on conservation and physical coarsening, while computer-vision practitioners tune loss terms for segmentation accuracy and speed.\n",
      "translation_table": [
        {
          "field_a_term": "order parameter concentration",
          "field_b_term": "soft region-membership function",
          "note": "Both encode smooth region identity."
        },
        {
          "field_a_term": "interfacial energy or gradient penalty",
          "field_b_term": "boundary regularization",
          "note": "Both penalize rough boundaries."
        },
        {
          "field_a_term": "spinodal coarsening",
          "field_b_term": "iterative label smoothing and region merging",
          "note": "The analogy is limited by nonconserved labels."
        }
      ],
      "references": [
        {
          "doi": "10.1063/1.1744102",
          "note": "Cahn and Hilliard (1958) free energy of nonuniform systems."
        },
        {
          "doi": "10.1109/83.902291",
          "note": "Chan and Vese (2001) active contours without edges for image segmentation."
        }
      ],
      "last_reviewed": "2026-05-07",
      "file": "cross-domain/mathematics-computer-science/b-cahn-hilliard-phase-separation-x-diffuse-interface-segmentation.yaml"
    },
    {
      "id": "b-category-theory-functional-programming",
      "title": "Category theory (Eilenberg & Mac Lane 1945) is the semantic foundation of functional programming: types are objects, functions are morphisms, functors are type constructors, monads are monoids in the category of endofunctors, and the Curry-Howard correspondence makes propositions = types and proofs = programs.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "Category theory — the abstract mathematics of structure-preserving maps — is not merely analogous to functional programming; it is the precise mathematical semantics of statically-typed functional languages.\n1. Category = type system. A category C consists of objects (types in a\n   programming langu",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-univalence-axiom-proof-assistant-verification"
      ],
      "communication_gap": "Pure mathematicians (category theory) and software engineers (functional programming) have largely parallel but disconnected communities. Category theory courses rarely mention programming; most programmers learn monads from blog posts without understanding the mathematical foundations. The Haskell community and the applied category theory community (MIT, Topos Institute) are the main bridges. Industry adoption (Scala, Kotlin, Rust) is increasing functional programming use, but the categorical foundations remain opaque to most practitioners.\n",
      "translation_table": [
        {
          "field_a_term": "category object (mathematics)",
          "field_b_term": "type in a functional programming language",
          "note": "Int, String, List<A> are objects; the type system is a category"
        },
        {
          "field_a_term": "morphism f: A → B (category theory)",
          "field_b_term": "function f :: A -> B (functional programming)",
          "note": "All functions are morphisms; function composition = morphism composition"
        },
        {
          "field_a_term": "functor F: C → D (category theory)",
          "field_b_term": "Functor typeclass / type constructor (Haskell/Scala)",
          "note": "List, Maybe, IO are functors; fmap = functor action on morphisms"
        },
        {
          "field_a_term": "natural transformation η: F ⟹ G (category theory)",
          "field_b_term": "polymorphic function / parametric function (programming)",
          "note": "safeHead :: [a] -> Maybe a is a natural transformation List ⟹ Maybe"
        },
        {
          "field_a_term": "monad (monoid in endofunctors) (category theory)",
          "field_b_term": "Monad typeclass with return/bind (Haskell IO, State, Parser)",
          "note": "Monads are the category-theoretic semantics of computational effects"
        },
        {
          "field_a_term": "cartesian closed category (mathematics)",
          "field_b_term": "simply-typed lambda calculus (computer science)",
          "note": "Curry-Howard-Lambek: proofs = programs = morphisms in CCC"
        }
      ],
      "references": [
        {
          "doi": "10.2307/1990929",
          "note": "Eilenberg & Mac Lane (1945) Trans AMS 58:231 — general theory of natural equivalences"
        },
        {
          "doi": "10.1016/0890-5401(91)90052-4",
          "note": "Moggi (1991) Inf Comput 93:55 — notions of computation and monads"
        },
        {
          "url": "https://dl.acm.org/doi/10.1145/143165.143169",
          "note": "Wadler (1992) POPL — the essence of functional programming (monads)"
        },
        {
          "url": "https://link.springer.com/book/10.1007/978-1-4757-4721-8",
          "note": "Mac Lane (1971) Categories for the Working Mathematician — Springer Graduate Texts"
        }
      ],
      "last_reviewed": "2026-05-06",
      "file": "cross-domain/mathematics-computer-science/b-category-theory-functional-programming.yaml"
    },
    {
      "id": "b-complexity-proof-theory",
      "title": "The Cook-Levin theorem (1971) establishes SAT as NP-complete; Gödel's incompleteness theorems and Turing's halting problem both derive from diagonalization; the Curry-Howard correspondence identifies programs with proofs and types with propositions; interactive proof systems (IP=PSPACE) reveal that probabilistic verification is exponentially more powerful than deterministic checking — mathematics and computer science study the same logical limits from different directions.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "The Cook-Levin theorem (Cook 1971, Levin 1973): SAT is NP-complete — every problem in NP polynomially reduces to Boolean satisfiability. P vs NP (Clay Millennium Problem): does every efficiently verifiable problem have an efficient solution? Geometric complexity theory (Mulmuley & Sohoni 2001) attac",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-geometric-complexity-theory-p-np"
      ],
      "communication_gap": "Mathematical logicians, complexity theorists, and type theorists each developed their own language (proof theory, circuit complexity, type systems) to study the same foundational questions. Curry-Howard is widely known in programming language theory but not in logic or complexity theory. Most computer science curricula separate algorithms from logic; the deep connections (diagonalization, Kolmogorov complexity, circuit lower bounds) are graduate-level topics rarely encountered by practitioners.\n",
      "translation_table": [
        {
          "field_a_term": "Boolean satisfiability (SAT) — NP-complete under Cook-Levin",
          "field_b_term": "Hilbert's Entscheidungsproblem — undecidable under Church-Turing",
          "note": "SAT is the computational analog of the decision problem at finite resource bounds"
        },
        {
          "field_a_term": "Gödel sentence (true but unprovable in PA)",
          "field_b_term": "non-halting Turing machine (runs forever, no output)",
          "note": "both exploit self-reference; Kleene's recursion theorem unifies them formally"
        },
        {
          "field_a_term": "proposition P in intuitionistic logic",
          "field_b_term": "type T in simply-typed lambda calculus (Curry-Howard)",
          "note": "proof of P = program of type T; inhabited type = provable proposition"
        },
        {
          "field_a_term": "PSPACE (space-bounded computation)",
          "field_b_term": "IP interactive proof power (Shamir: IP = PSPACE)",
          "note": "randomness allows efficient verification of exponentially many possibilities"
        },
        {
          "field_a_term": "permanent polynomial (VNP-complete)",
          "field_b_term": "determinant polynomial (VP-complete, efficiently computable)",
          "note": "Valiant: if perm in VP then #P ⊆ poly — implies P≠NP under derandomization"
        }
      ],
      "references": [
        {
          "doi": "10.1145/800157.805047",
          "note": "Cook (1971) The complexity of theorem-proving procedures. STOC 3:151–158"
        },
        {
          "note": "Sipser (2013) Introduction to the Theory of Computation, 3rd ed; Cengage Learning"
        },
        {
          "doi": "10.1137/S0097539798347767",
          "note": "Mulmuley & Sohoni (2001) Geometric complexity theory I. SIAM J Comput 31:496–526"
        },
        {
          "doi": "10.1145/146585.146609",
          "note": "Shamir (1992) IP = PSPACE. J ACM 39:869–877"
        }
      ],
      "last_reviewed": "2026-05-06",
      "file": "cross-domain/mathematics-computer-science/b-complexity-proof-theory.yaml"
    },
    {
      "id": "b-compressed-sensing-sparse-recovery",
      "title": "Compressed sensing (Candès-Romberg-Tao, Donoho 2006) proves that k-sparse signals in ℝⁿ can be exactly recovered from m = O(k log n/k) random linear measurements via ℓ₁ minimisation — far fewer than the n measurements required by the Shannon-Nyquist theorem — creating a mathematical foundation for sub-Nyquist sampling that has revolutionised MRI, radar, and high-dimensional statistics.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "The Shannon-Nyquist sampling theorem states that a band-limited signal must be sampled at twice the highest frequency to allow perfect reconstruction. For a signal with n degrees of freedom, n measurements are required. Compressed sensing (CS) breaks this barrier for sparse signals.\nCandès, Romberg ",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-compressed-sensing-mri-10x-scan-time-reduction-clinical-safety"
      ],
      "communication_gap": "Compressed sensing was developed by applied mathematicians and signal processing engineers. Statisticians developed LASSO independently for regression problems. The formal equivalence (LASSO = basis pursuit under different noise models) was recognised within a few years, but the communities still publish in separate venues (IEEE Transactions on Information Theory / Applied and Computational Harmonic Analysis vs. Annals of Statistics / Journal of the Royal Statistical Society). Medical imaging adopted CS aggressively (FDA approved compressed sensing MRI in 2017), while clinical communities were unaware of the underlying mathematical theory.\n",
      "translation_table": [
        {
          "field_a_term": "k-sparse signal (at most k nonzeros out of n) (mathematics)",
          "field_b_term": "Parsimonious model: few active features in a high-dimensional dataset",
          "note": "Sparsity is the mathematical formalisation of Occam's razor"
        },
        {
          "field_a_term": "Restricted Isometry Property (RIP) of measurement matrix A",
          "field_b_term": "Near-orthogonality of regressor columns in statistics (low coherence)",
          "note": "RIP is the condition guaranteeing LASSO recovers sparse solution exactly"
        },
        {
          "field_a_term": "ℓ₁ minimisation (LASSO, basis pursuit)",
          "field_b_term": "Sparse variable selection in high-dimensional regression",
          "note": "Formally identical; compressed sensing provides exact recovery guarantees for LASSO"
        },
        {
          "field_a_term": "Random Gaussian measurement matrix A (compressed sensing)",
          "field_b_term": "Random projection / dimensionality reduction (Johnson-Lindenstrauss)",
          "note": "Both rely on random matrices satisfying RIP/JL with high probability"
        },
        {
          "field_a_term": "Shannon-Nyquist rate (n measurements for n-dimensional signal)",
          "field_b_term": "Classical statistics: need n observations to estimate n parameters",
          "note": "CS breaks the Nyquist barrier; high-dimensional statistics needs p << n regularisation"
        },
        {
          "field_a_term": "Sparsity in wavelet domain (MRI image)",
          "field_b_term": "Feature sparsity in biological signals (few active genes, sparse neural codes)",
          "note": "Natural images are wavelet-sparse; many biological signals are similarly sparse"
        }
      ],
      "references": [
        {
          "doi": "10.1109/TIT.2006.871582",
          "note": "Candès, Romberg & Tao (2006) Robust uncertainty principles: exact signal recovery from highly incomplete frequency information, IEEE Trans Inf Theory 52:489 — one of the founding CS papers\n"
        },
        {
          "doi": "10.1109/TIT.2006.885507",
          "note": "Donoho (2006) Compressed sensing, IEEE Trans Inf Theory 52:1289 — second founding paper; establishes ℓ₁ recovery and RIP framework\n"
        },
        {
          "doi": "10.1111/j.2517-6161.1996.tb02080.x",
          "note": "Tibshirani (1996) Regression shrinkage and selection via the LASSO, J R Stat Soc B 58:267 — introduces LASSO for sparse regression\n"
        },
        {
          "doi": "10.1002/mrm.21391",
          "note": "Lustig, Donoho & Pauly (2007) Sparse MRI: the application of compressed sensing for rapid MR imaging, Magn Reson Med 58:1182 — CS applied to MRI\n"
        }
      ],
      "last_reviewed": "2026-05-06",
      "file": "cross-domain/mathematics-computer-science/b-compressed-sensing-sparse-recovery.yaml"
    },
    {
      "id": "b-convolution-x-convolutional-neural-nets",
      "title": "Discrete convolution — diagonalized by the discrete Fourier transform via the convolution theorem — is the algebraic backbone of convolutional neural networks’ local translation-equivariant layers.",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "The convolution theorem states that convolution becomes pointwise multiplication in the Fourier domain (with appropriate boundary conditions). CNNs implement spatial convolution with learned kernels, enforcing locality and weight sharing that realize a shift-equivariant linear operator. In infinite ",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-cnn-layers-approximate-localized-spectral-filters"
      ],
      "communication_gap": "ML courses emphasize backprop; signal-processing courses emphasize FFT convolution. Practitioners overlap, but pedagogy rarely states the convolution theorem when teaching stride and dilation.",
      "translation_table": [
        {
          "field_a_term": "circular convolution / FFT",
          "field_b_term": "fast correlation filters in some architectures"
        },
        {
          "field_a_term": "transfer function of FIR filter",
          "field_b_term": "effective frequency response of a learned kernel"
        },
        {
          "field_a_term": "windowing / edge effects",
          "field_b_term": "boundary artifacts in padded convolutions"
        }
      ],
      "references": [
        {
          "doi": "10.1109/5.58337",
          "note": "LeCun et al. (1998) — gradient-based learning applied to document recognition (CNN foundations)"
        },
        {
          "doi": "10.1109/MSP.2010.938019",
          "note": "Mallat (1989/retrospective accessible) wavelet/scattering perspectives on hierarchical filtering"
        }
      ],
      "last_reviewed": "2026-05-07",
      "file": "cross-domain/mathematics-computer-science/b-convolution-x-convolutional-neural-nets.yaml"
    },
    {
      "id": "b-cryptography-number-theory",
      "title": "Modern cryptography is applied number theory: RSA security rests on the hardness of integer factorization, elliptic curve cryptography on the discrete logarithm problem over finite fields, and post-quantum cryptography on the shortest vector problem in integer lattices — each translating a mathematical hardness assumption into a practical security guarantee.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "RSA (Rivest, Shamir, Adleman 1978): public key e, private key d, modulus n = pq (product of two large primes). Key relationship: ed ≡ 1 (mod φ(n)) where φ(n) = (p-1)(q-1) is Euler's totient function. Encryption: c = m^e (mod n); decryption: m = c^d (mod n). Security relies on the hardness of integer",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-kyber-lwe-parameter-quantum-security-margin"
      ],
      "communication_gap": "Number theory was considered the purest of pure mathematics — G.H. Hardy famously celebrated its uselessness. The RSA paper (1978) imported Euler's totient theorem (1763) directly into practical communication security 215 years after its statement. Elliptic curves were studied by Fermat, Euler, and Abel for intrinsic mathematical interest; their application to cryptography (Miller 1985, Koblitz 1987) came 200 years later. NIST PQC standardization required number theorists, computer scientists, and security engineers to collaborate — communities that rarely share publications, conferences, or even mathematical vocabulary.\n",
      "translation_table": [
        {
          "field_a_term": "Euler's totient function φ(n) = |(Z/nZ)*|",
          "field_b_term": "RSA key space size / group order in RSA",
          "note": "the group structure of (Z/nZ)* under multiplication is the foundation of RSA correctness"
        },
        {
          "field_a_term": "discrete logarithm problem (DLP) in Z_p*",
          "field_b_term": "Diffie-Hellman key exchange security assumption",
          "note": "DH security requires DLP hardness; Pohlig-Hellman reduces DLP to prime-order subgroups"
        },
        {
          "field_a_term": "elliptic curve group law (chord-and-tangent, point at infinity)",
          "field_b_term": "ECC cryptographic operations (scalar multiplication Q = kP)",
          "note": "group structure enables public key cryptography with smaller key sizes than RSA"
        },
        {
          "field_a_term": "lattice (integer linear combinations of basis vectors in R^n)",
          "field_b_term": "LWE / NTRU post-quantum cryptographic keys",
          "note": "hardness of shortest vector problem (SVP) in high-dimensional lattices is the PQC security foundation"
        },
        {
          "field_a_term": "quantum Fourier transform (QFT over Z_N)",
          "field_b_term": "Shor's period-finding algorithm (breaks RSA and DLP)",
          "note": "QFT computes DFT over Z_N in O(log² N) quantum gates vs. O(N log N) classical FFT"
        }
      ],
      "references": [
        {
          "doi": "10.1145/359340.359342",
          "note": "Rivest, Shamir & Adleman (1978) A method for obtaining digital signatures and public-key cryptosystems; Commun ACM 21:120"
        },
        {
          "doi": "10.1016/S0022-0000(85)80036-X",
          "note": "Miller (1985) Use of elliptic curves in cryptography; J Comput Syst Sci 31:469"
        },
        {
          "doi": "10.1109/SFCS.1994.365700",
          "note": "Shor (1994) Algorithms for quantum computation — discrete logarithms and factoring; FOCS 35:124"
        },
        {
          "note": "NIST (2022) PQC Standardization Round 3 Finalists and Selected Algorithms — NIST IR 8413"
        }
      ],
      "last_reviewed": "2026-05-06",
      "file": "cross-domain/mathematics-computer-science/b-cryptography-number-theory.yaml"
    },
    {
      "id": "b-curry-howard-proofs-programs",
      "title": "The Curry-Howard correspondence proves that propositions in intuitionistic logic are identical to types in typed lambda calculus, and proofs of those propositions are identical to programs of those types — mathematics and computation are the same formal system viewed from two perspectives.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "The Curry-Howard isomorphism (independently discovered by Haskell Curry in 1934 for combinatory logic and William Howard in 1969 for natural deduction) establishes an exact correspondence between the syntax of formal logic and the syntax of typed functional programming:\nLOGICAL SIDE             ↔   ",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-homotopy-type-theory-univalence-computation"
      ],
      "communication_gap": "The Curry-Howard isomorphism is well-known in programming language theory and type theory, where it has driven decades of research on proof assistants. However, it is almost entirely absent from standard mathematics education, where logic and computation are taught separately. Working mathematicians using proof assistants often do not conceptualise what they are doing as programming; conversely, most software engineers have never heard of the correspondence that underlies the type systems they use daily. The categorical completion (Curry-Howard-Lambek) is understood mainly in theoretical computer science and category theory communities.\n",
      "translation_table": [
        {
          "field_a_term": "Proposition P in intuitionistic logic",
          "field_b_term": "Type P in typed lambda calculus",
          "note": "Propositions and types are the same syntactic category; the distinction is only conceptual"
        },
        {
          "field_a_term": "Proof of proposition P",
          "field_b_term": "Term (program) of type P",
          "note": "A proof is a program; writing a proof is writing a certified program"
        },
        {
          "field_a_term": "Logical implication A → B",
          "field_b_term": "Function type A → B",
          "note": "Modus ponens = function application; introducing → = lambda abstraction"
        },
        {
          "field_a_term": "Conjunction A ∧ B",
          "field_b_term": "Product type (pair type) A × B",
          "note": "Proving A and B = constructing a pair (a, b); projections = ∧-elimination"
        },
        {
          "field_a_term": "Disjunction A ∨ B",
          "field_b_term": "Sum type (tagged union) A + B",
          "note": "Proving A or B = injecting into the left or right component; case analysis = ∨-elimination"
        },
        {
          "field_a_term": "Cut rule in sequent calculus",
          "field_b_term": "Let-binding / function application",
          "note": "Cut elimination = β-reduction; eliminating a lemma = inlining a function call"
        },
        {
          "field_a_term": "Proof normalisation (cut-free proof)",
          "field_b_term": "β-normal form (fully reduced program)",
          "note": "Church-Rosser theorem for λ-calculus = confluence of proof normalisation"
        },
        {
          "field_a_term": "Law of excluded middle (P ∨ ¬P)",
          "field_b_term": "Call/cc (call-with-current-continuation)",
          "note": "Classical logic corresponds to languages with first-class control flow"
        }
      ],
      "references": [
        {
          "note": "Howard (1980) in Seldin & Hindley (eds.) To H.B. Curry: Essays on Combinatory Logic — formulae-as-types correspondence (original 1969 manuscript)",
          "url": "https://www.cs.cmu.edu/~crary/819-f09/Howard80.pdf"
        },
        {
          "doi": "10.1145/2699407",
          "note": "Wadler (2015) Comm ACM 58:75 — Propositions as types; accessible survey of the correspondence"
        },
        {
          "note": "Martin-Löf (1984) Intuitionistic Type Theory — dependent types extending Curry-Howard to full mathematics",
          "url": "https://archive-pml.github.io/martin-lof/pdfs/Bibliopolis-Book-retypeset-1984.pdf"
        },
        {
          "note": "Curry & Feys (1958) Combinatory Logic Vol I — original combinatory logic; implicit Curry-Howard for combinators",
          "url": "https://doi.org/10.2307/2964755"
        },
        {
          "arxiv": "1308.0514",
          "note": "Homotopy Type Theory Book (2013) — univalence axiom extending Curry-Howard to homotopy theory"
        }
      ],
      "last_reviewed": "2026-05-05",
      "file": "cross-domain/mathematics-computer-science/b-curry-howard-proofs-programs.yaml"
    },
    {
      "id": "b-elastic-net-map-x-laplace-gaussian-composite-prior",
      "title": "Elastic net regularization can be read as MAP estimation under a composite sparsity-and-shrinkage prior: the L1 term behaves like a Laplace prior, while the L2 term behaves like a Gaussian prior that stabilizes correlated predictors.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "The bridge makes the frequentist penalty/Bayesian prior equivalence explicit for model selection under correlated designs. It is useful for calibrating regularization paths, but posterior uncertainty requires a full probabilistic model rather than only the MAP optimum.\n",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-mixing-parameter-matches-posterior-sparsity-stability-curves"
      ],
      "communication_gap": "ML implementations present alpha and l1_ratio as optimizer knobs, while Bayesian statisticians discuss prior scale and posterior sensitivity.\n",
      "translation_table": [
        {
          "field_a_term": "L1 penalty",
          "field_b_term": "Laplace sparsity prior",
          "note": "MAP equivalence for sparsity."
        },
        {
          "field_a_term": "L2 penalty",
          "field_b_term": "Gaussian shrinkage prior",
          "note": "MAP equivalence for ridge stabilization."
        },
        {
          "field_a_term": "grouping effect for correlated predictors",
          "field_b_term": "prior-induced sharing of coefficient mass",
          "note": "Interpretability depends on calibration."
        }
      ],
      "references": [
        {
          "doi": "10.1111/j.1467-9868.2005.00503.x",
          "note": "Zou and Hastie (2005) elastic net."
        },
        {
          "doi": "10.1198/016214508000000337",
          "note": "Park and Casella Bayesian lasso context for L1 as Laplace prior."
        }
      ],
      "last_reviewed": "2026-05-07",
      "file": "cross-domain/mathematics-computer-science/b-elastic-net-map-x-laplace-gaussian-composite-prior.yaml"
    },
    {
      "id": "b-elliptic-curve-complex-torus-x-finite-field-crypto-pedagogy",
      "title": "Elliptic curves over ℂ form complex tori (compact genus-one Riemann surfaces) where the group law comes from analytic geometry — modern ECC uses curves over finite fields where points form finite Abelian groups with no literal torus topology; pedagogy often introduces the complex picture first for intuition, then warns that cryptographic security lives in discrete logarithms on 𝔽_q-rational points.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "The chord-and-tangent group law is uniform across fields — explaining why textbooks illustrate ℂ/Λ pictorially — but security proofs and side-channel engineering operate on Galois cohomology, embedding degrees, and curve orders over finite fields — **the torus mental model must not be mistaken for t",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-sequence-complex-torus-first-ecc-exam-performance"
      ],
      "communication_gap": "Algebraic geometry curricula visualize complex tori while applied cryptography courses jump straight to NIST curves — newcomers may over-identify torus geometry with protocol safety unless instructors foreground finite-field discrete logarithm hardness.\n",
      "translation_table": [
        {
          "field_a_term": "complex elliptic curve E(ℂ) ≅ ℂ/Λ as a torus",
          "field_b_term": "cyclic subgroups of E(𝔽_q) used in ECDH / ECDSA implementations",
          "note": "Same abstract group law definition; different topological pictures and arithmetic degrees."
        },
        {
          "field_a_term": "fundamental parallelogram periodicity",
          "field_b_term": "modular reduction of scalar multiplication k·P over 𝔽_q",
          "note": "Periodicity intuition motivates wrap-around diagrams only metaphorically for finite fields."
        },
        {
          "field_a_term": "analytic uniformization (Weierstrass ℘)",
          "field_b_term": "explicit formulas for point addition in projective coordinates on chip",
          "note": "Engineers implement rational formulas; analysts use expansions — complementary views."
        }
      ],
      "references": [
        {
          "doi": "10.1007/978-1-4612-0441-1",
          "note": "Silverman (1986/2009) — Arithmetic of Elliptic Curves (complex uniformization + general theory)"
        },
        {
          "doi": "10.1007/978-1-4939-1711-7",
          "note": "Washington — Elliptic Curve Number Theory and Cryptography (finite-field protocols)"
        }
      ],
      "last_reviewed": "2026-05-07",
      "file": "cross-domain/mathematics-computer-science/b-elliptic-curve-complex-torus-x-finite-field-crypto-pedagogy.yaml"
    },
    {
      "id": "b-gnn-weisfeiler-lehman-isomorphism",
      "title": "Graph neural networks are computationally equivalent to the Weisfeiler-Lehman graph isomorphism test, linking the expressive power of GNN architectures to a classical combinatorial algorithm from 1968.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "Message-passing graph neural networks (MPGNNs) are at most as powerful as the 1-Weisfeiler-Lehman (1-WL) color refinement algorithm: two graphs that 1-WL cannot distinguish will be assigned identical embeddings by any MPGNN, with equality achieved by the GIN architecture using injective aggregation ",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-higher-order-gnn-practical-expressiveness"
      ],
      "communication_gap": "Machine learning researchers develop GNN architectures empirically while combinatorialists study WL algorithms as abstract complexity tools; the Xu et al. (2019) theoretical connection appeared 50 years after the WL algorithm and is still not widely taught in either ML or combinatorics curricula.\n",
      "translation_table": [
        {
          "field_a_term": "node feature aggregation in GNN (machine learning)",
          "field_b_term": "color refinement step in WL algorithm (combinatorics)",
          "note": "Both compute new node labels by collecting multisets of neighbor labels"
        },
        {
          "field_a_term": "readout function / graph embedding (machine learning)",
          "field_b_term": "canonical graph certificate from WL color histogram (combinatorics)",
          "note": "The final graph representation is the histogram of stable WL colors"
        },
        {
          "field_a_term": "graph isomorphism network GIN (machine learning)",
          "field_b_term": "injective multiset hash achieving 1-WL power (combinatorics)",
          "note": "GIN with sum aggregation and MLP achieves the maximum 1-WL expressiveness"
        },
        {
          "field_a_term": "higher-order GNNs / k-GNNs (machine learning)",
          "field_b_term": "k-dimensional WL hierarchy (combinatorics)",
          "note": "k-WL corresponds to distinguishing graphs via k-tuples rather than individual nodes"
        }
      ],
      "references": [
        {
          "doi": "10.48550/arXiv.1810.00826",
          "note": "Xu et al. (2019) - How Powerful are Graph Neural Networks? WL equivalence proof"
        },
        {
          "doi": "10.48550/arXiv.1901.09342",
          "note": "Maron et al. (2019) - invariant and equivariant GNNs and higher-order WL"
        },
        {
          "doi": "10.1109/SFCS.1992.267779",
          "note": "Cai, Furer & Immerman (1992) - optimal lower bounds for WL graph isomorphism"
        }
      ],
      "last_reviewed": "2026-05-07",
      "file": "cross-domain/mathematics-computer-science/b-gnn-weisfeiler-lehman-isomorphism.yaml"
    },
    {
      "id": "b-hyperbolic-geometry-x-network-embedding",
      "title": "Hyperbolic geometry provides exponentially more room in a ball of fixed radius than Euclidean space, making it a natural host geometry for embeddings of trees and scale-free hierarchical networks.",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "Trees embed with low distortion in hyperbolic space because distances grow like logs of branching depth, matching the volume growth of hyperbolic balls. Poincaré and Lorentz models therefore yield compact embeddings for hierarchical similarity structure that would require high-dimensional Euclidean ",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-real-hierarchies-embed-better-in-hyperbolic-space"
      ],
      "communication_gap": "Differential geometers discuss curvature abstractly, while ML papers often treat hyperbolic models as engineering tricks without stating identifiability limits for empirical networks.",
      "translation_table": [
        {
          "field_a_term": "negative curvature κ < 0",
          "field_b_term": "trainable embedding space curvature"
        },
        {
          "field_a_term": "hyperbolic distance",
          "field_b_term": "graph reconstruction loss in embedding space"
        },
        {
          "field_a_term": "exponential volume growth",
          "field_b_term": "capacity for many leaves at similar graph depth"
        }
      ],
      "references": [
        {
          "arxiv": "1705.08039",
          "note": "Nickel & Kiela (2017) — Poincaré embeddings for hierarchical representations"
        },
        {
          "arxiv": "1806.03417",
          "note": "Nickel & Kiela (2018) — Lorentz / hyperboloid model for hierarchical embeddings"
        }
      ],
      "last_reviewed": "2026-05-07",
      "file": "cross-domain/mathematics-computer-science/b-hyperbolic-geometry-x-network-embedding.yaml"
    },
    {
      "id": "b-information-geometry-machine-learning",
      "title": "Information geometry (Amari) equips the space of probability distributions with a Riemannian metric via the Fisher information matrix, enabling natural gradient descent invariant to reparametrisation in machine learning",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "Information geometry (Amari 1985) applies differential geometry to the statistical manifold — the space of probability distributions parametrised by θ. The Fisher information matrix g_ij(θ) = E[(∂log p/∂θ_i)(∂log p/∂θ_j)] defines a Riemannian metric (the unique metric invariant under sufficient stat",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-natural-gradient-fisher-rao-metric-optimal-deep-learning-convergence"
      ],
      "communication_gap": "Differential geometers and machine learning engineers rarely interact. Information geometry developed primarily in the statistics literature (Amari's group in Japan) and is underutilised in mainstream machine learning despite its direct relevance. The computational cost of inverting the Fisher matrix long discouraged adoption; K-FAC and related approximations have only recently bridged this gap.\n",
      "translation_table": [
        {
          "field_a_term": "Fisher information matrix g_ij",
          "field_b_term": "Riemannian metric on parameter space",
          "note": "Same object — the metric that makes the statistical manifold into a Riemannian space"
        },
        {
          "field_a_term": "KL divergence D_KL(p||q)",
          "field_b_term": "squared geodesic distance (to second order)",
          "note": "KL divergence is not a metric but approximates squared geodesic distance near p=q"
        },
        {
          "field_a_term": "natural gradient F⁻¹∇L",
          "field_b_term": "Riemannian gradient (covariant derivative)",
          "note": "Natural gradient is the steepest ascent direction in the Riemannian metric"
        },
        {
          "field_a_term": "exponential family / mixture family (dual foliations)",
          "field_b_term": "e-flat and m-flat submanifolds",
          "note": "Dual geometric structures enable exact information projection algorithms"
        }
      ],
      "references": [
        {
          "note": "Amari (1985) Differential-Geometrical Methods in Statistics — foundational text"
        },
        {
          "note": "Amari (1998) — natural gradient works efficiently in learning",
          "doi": "10.1162/089976698300017746"
        },
        {
          "note": "Martens (2014) New Insights and Perspectives on the Natural Gradient Method — ICML"
        },
        {
          "note": "Nielsen (2020) — an elementary introduction to information geometry",
          "doi": "10.3390/e22101100"
        }
      ],
      "last_reviewed": "2026-05-06",
      "file": "cross-domain/mathematics-computer-science/b-information-geometry-machine-learning.yaml"
    },
    {
      "id": "b-linear-algebra-deep-learning",
      "title": "Deep neural networks are compositions of linear maps (weight matrices) and nonlinear activations whose training dynamics are governed, in the infinite-width limit, by the Neural Tangent Kernel — reducing deep learning to kernel regression and connecting it to spectral linear algebra, Jacobian conditioning, and random matrix theory.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "A deep neural network f(x) = σ(W_L · σ(W_{L-1} · ... · σ(W_1 x))) is architecturally a composition of linear maps (weight matrices Wᵢ ∈ ℝ^{n×m}) and pointwise nonlinearities. Backpropagation computes ∂L/∂Wᵢ via the chain rule, which at each layer involves the Jacobian J_{ij} = ∂yᵢ/∂xⱼ — a matrix who",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-ntk-deep-learning-kernel-regression"
      ],
      "communication_gap": "Linear algebra and random matrix theory are taught in mathematics departments that rarely interact with machine learning groups. The NTK paper (Jacot 2018) appeared at NeurIPS but its implications for kernel methods and functional analysis are still being absorbed by the broader linear algebra community. Many ML practitioners use linear algebra tools (SVD, eigendecomposition) without awareness of the deeper random matrix theory results that characterize typical random weights — and many mathematicians who work on random matrices are unaware of the practical training diagnostics this theory enables.\n",
      "translation_table": [
        {
          "field_a_term": "weight matrix W ∈ ℝ^{m×n}",
          "field_b_term": "linear transformation in the network layer",
          "note": "The fundamental building block — a matrix multiplication followed by a nonlinearity"
        },
        {
          "field_a_term": "Jacobian J = ∂y/∂x",
          "field_b_term": "gradient propagation operator (per-layer)",
          "note": "Conditioning of J determines gradient flow; poorly conditioned J → vanishing/exploding gradients"
        },
        {
          "field_a_term": "singular value decomposition (SVD)",
          "field_b_term": "layer-wise learning dynamics in linear networks",
          "note": "Saxe et al. showed linear networks learn via sequential SVD mode acquisition"
        },
        {
          "field_a_term": "kernel function k(x,x')",
          "field_b_term": "Neural Tangent Kernel governing infinite-width training",
          "note": "NTK converts gradient descent on a deep network into kernel regression"
        },
        {
          "field_a_term": "Marchenko-Pastur distribution",
          "field_b_term": "random matrix null hypothesis for weight matrix spectra",
          "note": "Deviations from MP law indicate layers have learned structured information"
        },
        {
          "field_a_term": "condition number κ(W) = σ_max/σ_min",
          "field_b_term": "gradient stability metric for network layers",
          "note": "Batch norm, layer norm, and spectral norm all reduce κ to stabilize training"
        }
      ],
      "references": [
        {
          "note": "Rumelhart, Hinton & Williams (1986) — backpropagation and the chain rule",
          "doi": "10.1038/323533a0"
        },
        {
          "note": "Jacot, Gabriel & Hongler (2018) — Neural Tangent Kernel",
          "doi": "10.48550/arXiv.1806.07572"
        },
        {
          "note": "Saxe, McClelland & Ganguli (2014) — exact learning dynamics in linear networks",
          "doi": "10.48550/arXiv.1312.6120"
        },
        {
          "note": "Martin & Mahoney (2021) — heavy-tailed self-regularization in deep learning",
          "doi": "10.5555/3454287.3454403"
        }
      ],
      "last_reviewed": "2026-05-06",
      "file": "cross-domain/mathematics-computer-science/b-linear-algebra-deep-learning.yaml"
    },
    {
      "id": "b-ransac-robust-estimation-x-astronomical-source-matching",
      "title": "RANSAC-style robust estimation and astronomical source matching share an outlier-dominated geometry problem: infer a transformation or correspondence from sparse inliers while cosmic rays, blends, artifacts, and catalog mismatches act as structured outliers.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "proposed",
      "bridge_claim": "The bridge is methodological. Astronomical cross-matching can use robust geometric-estimation ideas, but sky-survey outliers are not uniformly random, so standard RANSAC sampling assumptions require domain-specific modification.\n",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-quality-ranked-ransac-improves-astrometric-crossmatch-precision"
      ],
      "communication_gap": "Computer vision emphasizes generic geometric models, while astronomy pipelines encode survey-specific priors and quality flags.\n",
      "translation_table": [
        {
          "field_a_term": "minimal sample consensus set",
          "field_b_term": "small set of trusted matched stars or sources",
          "note": "Both seed a candidate transform."
        },
        {
          "field_a_term": "inlier threshold",
          "field_b_term": "astrometric residual tolerance",
          "note": "Threshold choice controls precision and recall."
        },
        {
          "field_a_term": "structured outliers",
          "field_b_term": "blends, artifacts, cosmic rays, proper-motion mismatches",
          "note": "Survey outliers violate uniform assumptions."
        }
      ],
      "references": [
        {
          "doi": "10.1145/358669.358692",
          "note": "Fischler and Bolles (1981) RANSAC."
        }
      ],
      "last_reviewed": "2026-05-07",
      "file": "cross-domain/mathematics-computer-science/b-ransac-robust-estimation-x-astronomical-source-matching.yaml"
    },
    {
      "id": "b-stone-weierstrass-x-universal-approximation-intuition",
      "title": "Stone-Weierstrass approximation and neural-network universal approximation theorems share a compact-set density intuition: rich function classes approximate continuous targets arbitrarily well, but the analogy must be separated from learnability, sample complexity, and optimization claims.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "The bridge is pedagogical and formal at the level of density theorems: both results say an expressive algebra or network family can approximate continuous functions on compact domains. It does not imply that gradient descent will find the approximant or that finite data identify it.\n",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-compact-algebra-first-sequence-improves-uap-transfer"
      ],
      "communication_gap": "Mathematicians teach density theorems abstractly, while ML curricula often present universal approximation without topology and compactness caveats.\n",
      "translation_table": [
        {
          "field_a_term": "separating algebra of continuous functions",
          "field_b_term": "neural network class with non-polynomial activation",
          "note": "Both are richness conditions."
        },
        {
          "field_a_term": "uniform approximation on compact sets",
          "field_b_term": "universal approximation on bounded input domains",
          "note": "Compact-domain caveats matter."
        },
        {
          "field_a_term": "existence theorem",
          "field_b_term": "not a training or generalization guarantee",
          "note": "The bridge must not be overstated."
        }
      ],
      "references": [
        {
          "doi": "10.1007/BF02551274",
          "note": "Cybenko (1989) universal approximation theorem for sigmoidal functions."
        },
        {
          "doi": "10.1016/0893-6080(89)90020-8",
          "note": "Hornik, Stinchcombe and White (1989) multilayer feedforward networks as universal approximators."
        }
      ],
      "last_reviewed": "2026-05-07",
      "file": "cross-domain/mathematics-computer-science/b-stone-weierstrass-x-universal-approximation-intuition.yaml"
    },
    {
      "id": "b-type-theory-functional-programming",
      "title": "The Curry-Howard correspondence identifies types in programming languages with propositions in logic and programs with proofs — making proof assistants (Coq, Lean) and systems languages (Rust borrow checker) instances of applied type theory.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "The Curry-Howard isomorphism (Curry 1934 combinatory logic; Howard 1969 natural deduction) establishes: types ↔ propositions; programs ↔ proofs; program execution ↔ proof normalization; function types A→B ↔ implication A⊃B; product types A×B ↔ conjunction A∧B; sum types A+B ↔ disjunction A∨B. The si",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-dependent-types-industrial-systems-programming-feasibility"
      ],
      "communication_gap": "Type theory researchers (in mathematics and PL theory) develop expressive type systems and prove their properties; systems programmers who use Rust's borrow checker daily are largely unaware that it instantiates affine type theory from linear logic (Girard 1987). The HoTT program is known in algebraic topology and type theory communities but is rarely discussed in mainstream CS curricula.\n",
      "translation_table": [
        {
          "field_a_term": "proposition in logic (e.g., A∧B)",
          "field_b_term": "type in programming language (e.g., A×B product type)",
          "note": "the fundamental Curry-Howard identification"
        },
        {
          "field_a_term": "proof of proposition A",
          "field_b_term": "program of type A (an inhabitant of type A)",
          "note": "proving a theorem = writing a type-correct program"
        },
        {
          "field_a_term": "intuitionistic propositional logic",
          "field_b_term": "simply typed lambda calculus",
          "note": "exact correspondence; classical logic corresponds to continuation-passing style"
        },
        {
          "field_a_term": "dependent type (type depends on a value)",
          "field_b_term": "type-theoretic encoding of universal/existential quantifiers",
          "note": "allows Vec(n) (vector of length n) to be a type — lengths checked at compile time"
        },
        {
          "field_a_term": "proof normalization (cut elimination)",
          "field_b_term": "program evaluation / beta reduction",
          "note": "running a program corresponds to simplifying (normalizing) a proof"
        },
        {
          "field_a_term": "affine type (used at most once)",
          "field_b_term": "Rust ownership/borrow checker (no aliasing of mutable references)",
          "note": "eliminates memory safety bugs via type system; no runtime overhead"
        }
      ],
      "references": [
        {
          "note": "Curry & Feys (1958) Combinatory Logic Vol. I; North-Holland"
        },
        {
          "note": "Martin-Löf (1975) An intuitionistic theory of types; in Many-Sorted Logic and Its Applications"
        },
        {
          "note": "Gonthier (2008) Formal proof — the four-color theorem; Not Am Math Soc 55:1382"
        },
        {
          "note": "The Univalent Foundations Program (2013) Homotopy Type Theory; Institute for Advanced Study"
        }
      ],
      "last_reviewed": "2026-05-07",
      "file": "cross-domain/mathematics-computer-science/b-type-theory-functional-programming.yaml"
    },
    {
      "id": "b-wasserstein-gan-gradient-penalty-x-kantorovich-lipschitz-stability",
      "title": "Wasserstein GAN training constrains the critic to approximate a 1-Lipschitz dual potential via gradient penalties or spectral normalization — reframing practical stability as enforcing convex-analytic regularity conditions inherited from Kantorovich optimal transport duality, beyond the coarse statement “WGAN uses Earth mover’s distance.”\n",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "Kantorovich duality expresses W₁ as a supremum over 1-Lipschitz test functions; empirical WGAN critics approximate this supremum with neural nets, and gradient-penalty variants (Gulrajani et al.) directly penalize dual-feasibility violations (‖∇f‖) along interpolation segments — training stability l",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-gradient-penalty-magnitude-tracks-dual-feasibility-proxy-metrics"
      ],
      "communication_gap": "OT textbooks emphasize Kantorovich duality proofs while ML tutorials jump to PyTorch snippets — practitioners sometimes overlook that gradient penalties explicitly encode dual feasibility inspired by OT Lip constraints rather than arbitrary heuristics alone.\n",
      "translation_table": [
        {
          "field_a_term": "Dual feasibility ‖f‖_Lip ≤ 1 in Kantorovich problem",
          "field_b_term": "spectral norm bounds / gradient penalty on discriminator",
          "note": "Practical enforcement is approximate; equality with continuous OT holds only in ideal limits."
        },
        {
          "field_a_term": "entropic regularization of OT (Sinkhorn, ε > 0)",
          "field_b_term": "stochastic smoothing or noise in GAN critics / augmentation pipelines",
          "note": "Regularization families differ; analogy targets stabilization motifs broadly."
        },
        {
          "field_a_term": "convexity / concavity structure of dual objective",
          "field_b_term": "convex-concave saddle optimization algorithms used to train critic-generator pairs",
          "note": "Optimization viewpoint aligns partially with OT algorithms yet architecture matters enormously."
        }
      ],
      "references": [
        {
          "doi": "10.48550/arXiv.1704.00028",
          "note": "Gulrajani et al. (2017) — improved WGAN training using gradient penalty (ICML)"
        },
        {
          "doi": "10.48550/arXiv.1701.07875",
          "note": "Arjovsky, Chintala & Bottou (2017) — Wasserstein GAN; Kantorovich dual framing that motivates Lipschitz critics"
        }
      ],
      "last_reviewed": "2026-05-07",
      "file": "cross-domain/mathematics-computer-science/b-wasserstein-gan-gradient-penalty-x-kantorovich-lipschitz-stability.yaml"
    },
    {
      "id": "b-optimal-foraging-calculus-variations",
      "title": "Charnov's marginal value theorem — the optimal forager leaves a patch when instantaneous gain rate equals the habitat average — is derived from the calculus of variations (Lagrangian optimisation), making patch exploitation mathematically identical to optimal stopping problems in finance and drug dosing interval optimisation.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "Marginal value theorem (Charnov 1976): an optimal forager should leave a patch when the instantaneous rate of energy gain f'(t) equals the average rate for the habitat E*:\n\n  f'(t*) = E* = E[g(t)] / (T + t*)\n\nwhere t* = optimal residence time, T = mean travel time between patches, g(t) = gain functi",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-marginal-value-theorem-stochastic-extension"
      ],
      "communication_gap": "Ecologists learn MVT as a graphical theorem in behavioural ecology without deriving it from calculus of variations. Applied mathematicians working on optimal stopping do not know that MVT is an instance of their problem. The formalisation in McNamara & Houston (1986) bridges this but remains in specialist literature.\n",
      "translation_table": [
        {
          "field_a_term": "patch gain function g(t) (ecology)",
          "field_b_term": "objective functional in calculus of variations",
          "note": "Concavity of g(t) is the diminishing returns assumption required for a unique optimum"
        },
        {
          "field_a_term": "marginal gain f'(t) = dg/dt",
          "field_b_term": "Euler-Lagrange gradient condition",
          "note": "Optimality requires first-order stationarity of the variational problem"
        },
        {
          "field_a_term": "habitat average E* (optimal foraging)",
          "field_b_term": "Lagrange multiplier lambda (constrained optimisation)",
          "note": "E* is the shadow price of time — exactly the Lagrangian multiplier in the variational formulation"
        },
        {
          "field_a_term": "patch residence time t* (ecology)",
          "field_b_term": "optimal stopping time (stochastic control)",
          "note": "Both problems seek a threshold policy where action changes when marginal value hits threshold"
        },
        {
          "field_a_term": "travel time T between patches",
          "field_b_term": "transaction cost / switching cost (economics)",
          "note": "High travel time = high switching cost; lowers optimal frequency of patch switching / portfolio rebalancing"
        }
      ],
      "references": [
        {
          "doi": "10.1016/0040-5809(76)90007-5",
          "note": "Charnov (1976) Theor Popul Biol 9:129 — original marginal value theorem"
        },
        {
          "note": "MacArthur & Pianka (1966) Am Nat 100:603 — optimal diet theory precursor"
        },
        {
          "note": "Stephens & Krebs (1986) Foraging Theory, Princeton UP — comprehensive treatment"
        },
        {
          "note": "McNamara & Houston (1986) J Theor Biol 122:281 — dynamic programming extension"
        }
      ],
      "last_reviewed": "2026-05-06",
      "file": "cross-domain/mathematics-ecology/b-optimal-foraging-calculus-variations.yaml"
    },
    {
      "id": "b-optimal-foraging-x-explore-exploit",
      "title": "Charnov’s marginal value theorem for patch leaving under depletion parallels explore–exploit tradeoffs in sequential decision problems and bandit algorithms.",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "Optimal foraging theory predicts a forager leaves a patch when the marginal capture rate equals the long-run average intake rate achievable in the habitat — a stopping rule derived from renewal arguments. Multi-armed bandit policies and Gittins indices formalize a structurally similar problem: alloc",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-charnov-marginal-value-maps-to-index-policy-budgeting"
      ],
      "communication_gap": "Ecologists publish MVT as diagrams; computer scientists publish bandits with different notation. Cross-training reviews exist but empirical ethology rarely reports quantities comparable to indices.",
      "translation_table": [
        {
          "field_a_term": "patch residence time",
          "field_b_term": "number of pulls on an arm / option"
        },
        {
          "field_a_term": "depletion curve",
          "field_b_term": "expected reward schedule with risk"
        },
        {
          "field_a_term": "travel time / switching cost",
          "field_b_term": "switching penalty in bandits"
        }
      ],
      "references": [
        {
          "doi": "10.1016/S0003-3472(76)80060-X",
          "note": "Charnov (1976) — marginal value theorem"
        },
        {
          "doi": "10.1023/A:1013689704352",
          "note": "Auer, Cesa-Bianchi & Fischer (2002) — UCB finite-time analysis (canonical bandit bridge)"
        }
      ],
      "last_reviewed": "2026-05-07",
      "file": "cross-domain/mathematics-ecology/b-optimal-foraging-x-explore-exploit.yaml"
    },
    {
      "id": "b-perron-frobenius-population-dynamics",
      "title": "The Perron-Frobenius theorem guarantees that the Leslie matrix (age-structured population model) has a unique positive dominant eigenvalue λ₁ = asymptotic growth rate, with the stable age distribution as its eigenvector; sensitivity analysis of λ₁ to matrix entries guides conservation biology priorities.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "The Perron-Frobenius theorem (Perron 1907, Frobenius 1912) states: for any non-negative irreducible matrix A, there exists a unique dominant eigenvalue λ₁ > 0 (the Perron root) such that: - λ₁ > |λᵢ| for all other eigenvalues λᵢ - The corresponding eigenvectors v (right) and w (left) have all positi",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-elasticity-analysis-conservation-prioritisation"
      ],
      "communication_gap": "The Perron-Frobenius theorem is taught in linear algebra and functional analysis courses without any mention of ecology; Leslie matrix models are taught in ecology without presenting the underlying linear algebra theorem that guarantees their properties. Caswell's (2001) monograph is the canonical bridge, but it is rarely read by pure mathematicians. The generalisations of Perron-Frobenius (to Hilbert spaces, to multi-type branching processes) have only partially been applied to age-structured population models.\n",
      "translation_table": [
        {
          "field_a_term": "Perron root λ₁ (dominant eigenvalue)",
          "field_b_term": "asymptotic population growth rate λ = e^r",
          "note": "λ₁ > 1 means population grows; λ₁ < 1 means decline. Same as intrinsic rate of increase r."
        },
        {
          "field_a_term": "dominant right eigenvector v",
          "field_b_term": "stable age distribution (proportion in each age class)",
          "note": "Regardless of initial age distribution, population converges to this stable structure"
        },
        {
          "field_a_term": "dominant left eigenvector w",
          "field_b_term": "reproductive value by age class",
          "note": "Reproductive value = expected future offspring; maximised in prime reproductive age classes"
        },
        {
          "field_a_term": "non-negative irreducible matrix",
          "field_b_term": "Leslie matrix with non-zero fecundity in at least one class",
          "note": "Irreducibility = every age class eventually contributes to all others (biologically realistic condition)"
        },
        {
          "field_a_term": "sensitivity ∂λ₁/∂aᵢⱼ",
          "field_b_term": "conservation priority of life history stage ij",
          "note": "High sensitivity entries indicate which demographic rates most affect population viability"
        }
      ],
      "references": [
        {
          "note": "Perron, O. (1907). Zur Theorie der Matrices. Math. Ann. 64:248–263. -- Original Perron-Frobenius theorem for positive matrices"
        },
        {
          "note": "Leslie, P.H. (1945). On the use of matrices in certain population mathematics. Biometrika 33:183–212. -- Original Leslie matrix formulation for age-structured population dynamics"
        },
        {
          "note": "Caswell (2001) Matrix Population Models (2nd ed.) Sinauer -- sensitivity/elasticity analysis"
        },
        {
          "doi": "10.2307/1939611",
          "note": "de Kroon, H. et al. (1986). Elasticity: The relative contribution of demographic parameters to population growth rate. Ecology 67:1427–1431."
        }
      ],
      "last_reviewed": "2026-05-06",
      "file": "cross-domain/mathematics-ecology/b-perron-frobenius-population-dynamics.yaml"
    },
    {
      "id": "b-convex-optimization-economic-equilibrium",
      "title": "Arrow-Debreu general equilibrium existence (via Kakutani's fixed point theorem) is equivalent to solving a convex optimization problem — KKT conditions are conditions for economic optimality with resource constraints",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "The Arrow-Debreu general equilibrium theorem (1954) proves that under convexity of preferences and production sets, a competitive equilibrium exists and is Pareto optimal (first welfare theorem). The proof uses Kakutani's fixed point theorem applied to excess demand correspondences — the same mathem",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-mechanism-design-convex-duality-revenue-optimality"
      ],
      "communication_gap": "Economists rarely use algorithmic optimisation language; computer scientists working on equilibrium computation rarely connect to Arrow-Debreu theory. The mathematical equivalence between competitive equilibrium and convex programming was established by Samuelson and Dorfman et al. but the bridge to modern convex optimisation algorithms (Boyd & Vandenberghe) is underexploited in economic computation.\n",
      "translation_table": [
        {
          "field_a_term": "convex feasible set",
          "field_b_term": "convex consumption/production set",
          "note": "Convexity of economic sets is the regularity condition ensuring existence"
        },
        {
          "field_a_term": "Lagrange multiplier λ_i",
          "field_b_term": "shadow price / market-clearing price",
          "note": "Multipliers have direct economic interpretation as resource scarcity prices"
        },
        {
          "field_a_term": "KKT complementary slackness g_i(x*)λ_i = 0",
          "field_b_term": "market-clearing condition (no excess supply at positive price)",
          "note": "Zero price for non-scarce goods; positive price only for binding constraints"
        },
        {
          "field_a_term": "fixed point of best-response correspondence",
          "field_b_term": "competitive equilibrium / Nash equilibrium",
          "note": "Kakutani's theorem proves existence in both economic and game-theoretic settings"
        }
      ],
      "references": [
        {
          "note": "Arrow & Debreu (1954) — existence of an equilibrium for a competitive economy",
          "doi": "10.2307/1907353"
        },
        {
          "note": "Kakutani (1941) — fixed-point theorem for correspondences",
          "doi": "10.1215/S0012-7094-41-00838-4"
        },
        {
          "note": "Boyd & Vandenberghe (2004) Convex Optimization — standard reference"
        },
        {
          "note": "Mas-Colell, Whinston & Green (1995) Microeconomic Theory — KKT conditions and welfare"
        }
      ],
      "last_reviewed": "2026-05-06",
      "file": "cross-domain/mathematics-economics/b-convex-optimization-economic-equilibrium.yaml"
    },
    {
      "id": "b-information-economics-mechanism",
      "title": "Myerson's revelation principle (1979) shows incentive-compatible direct revelation mechanisms are without loss of generality; VCG achieves dominant- strategy incentive compatibility with efficiency; the Mirrlees optimal income tax model (Nobel 1996) shows the top marginal rate should be zero; the Crémer-McLean theorem enables full surplus extraction — mechanism design is reverse game theory unifying information economics, public finance, and social choice theory.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "Mechanism design (Hurwicz 1973, Myerson, Maskin, Nobel 2007) is the engineering of game rules to achieve desired social outcomes in the presence of private information. The revelation principle (Myerson 1979): for any Bayesian equilibrium of any mechanism, there exists a direct revelation mechanism ",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-revelation-principle-ai-alignment-mechanism"
      ],
      "communication_gap": "Mechanism design is primarily an economics subdiscipline (mathematical economics, auction theory) with its own journals. Computer scientists independently developed algorithmic mechanism design (Nisan-Ronen 1999) with computational efficiency constraints. AI researchers are now applying mechanism design to multi-agent systems and alignment without always being aware of the 50-year-old economics literature. Social choice theory (Arrow, Gibbard-Satterthwaite) and mechanism design overlap substantially but are often taught separately.\n",
      "translation_table": [
        {
          "field_a_term": "direct revelation mechanism (players report types)",
          "field_b_term": "incentive-compatible tax-transfer system (agents reveal income)",
          "note": "both reduce the mechanism design problem to a simpler canonical form via the revelation principle"
        },
        {
          "field_a_term": "dominant-strategy incentive compatibility (DSIC)",
          "field_b_term": "second-price auction (Vickrey) dominant strategy to bid truthfully",
          "note": "VCG generalizes Vickrey auction to multi-good, multi-agent settings"
        },
        {
          "field_a_term": "Bayesian incentive compatibility (BIC)",
          "field_b_term": "Nash equilibrium of a Bayesian game (interim expected payoffs)",
          "note": "BIC is weaker than DSIC; used in Mirrlees model and Myerson optimal auctions"
        },
        {
          "field_a_term": "VCG payment (residual social value externality)",
          "field_b_term": "Pigouvian tax correcting an externality",
          "note": "VCG is the decentralized analog of the Pigouvian internalization for information externalities"
        },
        {
          "field_a_term": "Mirrlees top marginal rate = 0 (no distortion at top)",
          "field_b_term": "no-distortion-at-the-top result in screening models",
          "note": "arises in all principal-agent screening models; top type receives no rent reduction"
        }
      ],
      "references": [
        {
          "doi": "10.1287/moor.4.1.61",
          "note": "Myerson (1979) Incentive compatibility and the bargaining problem. Math Oper Res 4:61–73"
        },
        {
          "doi": "10.2307/2296779",
          "note": "Mirrlees (1971) An exploration in the theory of optimum income taxation. Rev Econ Stud 38:175–208"
        },
        {
          "doi": "10.2307/1911324",
          "note": "Crémer & McLean (1988) Full extraction of the surplus in Bayesian and dominant strategy auctions. Econometrica 56:345–361"
        },
        {
          "note": "Hurwicz (1973) The design of mechanisms for resource allocation. Am Econ Rev 63:1–30"
        }
      ],
      "last_reviewed": "2026-05-06",
      "file": "cross-domain/mathematics-economics/b-information-economics-mechanism.yaml"
    },
    {
      "id": "b-optimal-stopping-secretary-problem",
      "title": "The optimal stopping secretary problem — stop searching when you have seen the best so far after sampling 1/e of candidates — is a universal decision rule for search under uncertainty that bridges pure mathematics (measure theory, Wald's equation) with cognitive science (how humans search for mates, jobs, and apartments) and provides a normative benchmark for bounded rational decision making.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "proposed",
      "bridge_claim": "The secretary problem asks: given N applicants arriving sequentially, each must be accepted or rejected immediately; how do you maximise the probability of selecting the best? The optimal strategy — observe the first N/e candidates without accepting, then accept the first who exceeds all previous — ",
      "open_unknowns": [],
      "related_hypotheses": [],
      "communication_gap": "",
      "translation_table": [
        {
          "field_a_term": "optimal stopping threshold (N/e)",
          "field_b_term": "37% rule in human search behavior",
          "note": "Observe 37% of options before selecting the first one better than all observed"
        },
        {
          "field_a_term": "dynamic programming (Bellman equation)",
          "field_b_term": "optimal reservation wage in job search theory",
          "note": "The value function recursion gives the optimal stopping rule for any sequential choice"
        },
        {
          "field_a_term": "Wald's identity (expected sample size)",
          "field_b_term": "optimal search length in mate choice or housing search",
          "note": "Predicts how long an optimal searcher should sample before committing"
        },
        {
          "field_a_term": "loss aversion (prospect theory deviation)",
          "field_b_term": "early stopping bias (humans stop before 37%)",
          "note": "The gap between 1/e and human stopping points is well-described by loss aversion"
        }
      ],
      "references": [
        {
          "doi": "10.1093/biomet/48.3-4.339",
          "note": "Lindley (1961) — dynamic programming and decision theory; derives the 1/e threshold"
        },
        {
          "doi": "10.1080/00029890.1966.11970960",
          "note": "Ferguson (1966) — who solved the secretary problem?"
        },
        {
          "doi": "10.1080/01621459.1994.10476795",
          "note": "Seale & Rapoport (1997) — sequential decision making with relative ranks"
        }
      ],
      "last_reviewed": "2026-05-07",
      "file": "cross-domain/mathematics-economics/b-optimal-stopping-secretary-problem.yaml"
    },
    {
      "id": "b-preference-elicitation-x-vickrey-auction",
      "title": "Dominant-strategy truthful mechanisms such as the Vickrey auction and VCG payments connect preference elicitation in economics to algorithmic mechanism design in computer science.",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "In a second-price sealed-bid auction, truthful bidding is a weakly dominant strategy: bidders should bid their values. Vickrey–Clarke–Groves mechanisms generalize this idea to allocate discrete goods and charge externality-based payments so truth-telling remains dominant under quasi-linear utility. ",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-vickrey-clarke-groves-payments-improve-lab-truthful-reporting"
      ],
      "communication_gap": "Economics experiments emphasize human behavioral deviations; CS theory assumes rational agents. Bridging requires empirical models of bounded rationality in mechanism design.",
      "translation_table": [
        {
          "field_a_term": "second-price payment rule",
          "field_b_term": "VCG externality pricing on reported types"
        },
        {
          "field_a_term": "dominant strategy incentive compatibility",
          "field_b_term": "truthful preference reporting in elicitation tasks"
        },
        {
          "field_a_term": "revenue equivalence (relaxations)",
          "field_b_term": "tradeoffs when budgets or behavioral deviations matter"
        }
      ],
      "references": [
        {
          "doi": "10.1111/j.1540-6261.1961.tb02789.x",
          "note": "Vickrey (1961) — counterspeculation and second-price auctions"
        },
        {
          "doi": "10.1145/1381989.1382947",
          "note": "Nisan & Ronen (1999) — algorithmic mechanism design (CS bridge)"
        }
      ],
      "last_reviewed": "2026-05-07",
      "file": "cross-domain/mathematics-economics/b-preference-elicitation-x-vickrey-auction.yaml"
    },
    {
      "id": "b-bode-sensitivity-integral-x-waterbed-effect",
      "title": "Bode’s sensitivity integral for minimum-phase plants ↔ the “waterbed effect” tradeoff in LQG/H-infinity robust control (classical control ↔ robust control theory)\n",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "For stable single-input single-output linear time-invariant systems that are minimum phase, Bode’s sensitivity integral forces integral of log|S(jω)| over frequency to equal zero when using standard weighting conventions—improving disturbance rejection in one band necessarily worsens it elsewhere, t",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-minimum-phase-plants-attain-tighter-bode-bounds"
      ],
      "communication_gap": "Classical Bode lectures emphasize graphical Nyquist/Bode plots; modern robust control emphasizes LMIs and synthesis software. Students may miss that the integral constraints are the same physical content.\n",
      "translation_table": [
        {
          "field_a_term": "sensitivity S = 1/(1+PC)",
          "field_b_term": "closed-loop transfer from disturbance to output in standard feedback diagram",
          "note": "Same object, different notation across textbooks."
        },
        {
          "field_a_term": "Bode integral of log|S|",
          "field_b_term": "waterbed constraint on achievable sensitivity templates",
          "note": "Minimum-phase case gives the starkest zero-sum picture."
        },
        {
          "field_a_term": "complementary sensitivity T = PC/(1+PC)",
          "field_b_term": "noise amplification / robustness to multiplicative uncertainty",
          "note": "Classic S+T=1 algebraic coupling."
        }
      ],
      "references": [
        {
          "doi": "10.1109/TAC.1978.1101643",
          "note": "Doyle (1978) — guaranteed margins for LQG regulators; classic waterbed framing."
        },
        {
          "doi": "10.1109/9.570020",
          "note": "Freudenberg & Looze (1985) — right half-plane zeros and design tradeoffs (IEEE Trans. Automat. Control)."
        }
      ],
      "last_reviewed": "2026-05-07",
      "file": "cross-domain/mathematics-engineering/b-bode-sensitivity-integral-x-waterbed-effect.yaml"
    },
    {
      "id": "b-koopman-operator-x-data-driven-dmd",
      "title": "Koopman (linear evolution on observables) ↔ dynamic mode decomposition and extended DMD for nonlinear flows (operator theory ↔ data-driven fluid mechanics)\n",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "The Koopman operator advances observables linearly even when state dynamics are nonlinear. Dynamic mode decomposition approximates Koopman eigenfunctions and eigenvalues from trajectory data, yielding spatial modes that oscillate and decay with single frequencies—useful for reduced-order modeling, c",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-koopman-linear-dynamics-capture-coherent-structures-limited-window"
      ],
      "communication_gap": "Functional analysis training differs from fluids labs’ data pipelines; convergence theory for EDMD on turbulent data is still unevenly communicated to practitioners.\n",
      "translation_table": [
        {
          "field_a_term": "Koopman eigenfunction phi(x)",
          "field_b_term": "DMD mode shape extracted from snapshots",
          "note": "Approximate equality under mild conditions and enough data."
        },
        {
          "field_a_term": "Koopman eigenvalue mu",
          "field_b_term": "DMD eigenvalue lambda = log(mu)/dt in discrete time",
          "note": "Links growth/decay/oscillation rates."
        },
        {
          "field_a_term": "spectral expansion of observables",
          "field_b_term": "rank-r truncated DMD reconstruction",
          "note": "Truncation controls model error."
        }
      ],
      "references": [
        {
          "doi": "10.1017/S0022112010001217",
          "note": "Schmid (2010) — dynamic mode decomposition of fluid flows (J. Fluid Mech.)."
        },
        {
          "doi": "10.1146/annurev-control-062918-053510",
          "note": "Mezić (2019) — Koopman operator methods in control and dynamical systems (Annu. Rev. Control Robot. Auton. Syst.)."
        }
      ],
      "last_reviewed": "2026-05-07",
      "file": "cross-domain/mathematics-engineering/b-koopman-operator-x-data-driven-dmd.yaml"
    },
    {
      "id": "b-lyapunov-stability-nonlinear-control",
      "title": "Lyapunov's stability theory (1892) provides the mathematical framework unifying nonlinear control engineering, passivity-based design, and automated stability verification via sum-of-squares semidefinite programming.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "Lyapunov stability (1892) characterises stability of ẋ = f(x) through existence of a Lyapunov function V(x) > 0 with V̇(x) ≤ 0. Finding such functions is the central challenge in nonlinear control. The bridge to engineering is threefold: (1) classical Bode/Routh-Hurwitz results are the linear specia",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-sos-lyapunov-global-nonpolynomial"
      ],
      "communication_gap": "Pure mathematicians studying dynamical systems and control engineers share the Lyapunov vocabulary in principle but operate in separate journals (SIAM Journal on Control, IEEE TAC, Automatica vs. Journal of Differential Equations). The SOS connection to semidefinite programming emerged from optimization researchers (Parrilo's MIT thesis) rather than either community. Nonlinear control engineers often use Lyapunov proofs as formal rituals rather than as guides to design; mathematicians often prove existence theorems without addressing computability.\n",
      "translation_table": [
        {
          "field_a_term": "Lyapunov function V(x) > 0, V̇ ≤ 0",
          "field_b_term": "control Lyapunov function / energy-like certificate",
          "note": "Engineers seek V that doubles as a storage function for passivity-based design"
        },
        {
          "field_a_term": "eigenvalues of linearization Df(x*) all Re(λ) < 0",
          "field_b_term": "Routh-Hurwitz stability criterion / gain and phase margin (Bode)",
          "note": "Linear engineering criteria are the infinitesimal / frequency-domain faces of Lyapunov"
        },
        {
          "field_a_term": "invariance principle (LaSalle)",
          "field_b_term": "convergence despite V̇ = 0 on invariant set — important for adaptive control",
          "note": "LaSalle's extension used routinely in adaptive and output-feedback control proofs"
        },
        {
          "field_a_term": "sum-of-squares decomposition p(x) = σᵢ²(x)",
          "field_b_term": "SOS semidefinite program searching polynomial Lyapunov certificates",
          "note": "Converts a non-convex existence question to a convex SDP — the core algorithmic bridge"
        },
        {
          "field_a_term": "storage function / dissipation inequality",
          "field_b_term": "passivity-based controller — interconnected passive subsystems are stable",
          "note": "Compositional stability guarantee that enables modular control design"
        }
      ],
      "references": [
        {
          "note": "Lyapunov (1892) The General Problem of the Stability of Motion, transl. Fuller (1992), Int J Control 55:531"
        },
        {
          "note": "Khalil (2002) Nonlinear Systems, 3rd ed., Prentice Hall — standard graduate text"
        },
        {
          "note": "Bode (1945) Network Analysis and Feedback Amplifier Design, Van Nostrand"
        },
        {
          "doi": "10.1109/CDC.2004.1428908",
          "note": "Prajna & Papachristodoulou (2004) — Analysis of switched and hybrid systems using SOS, CDC 2004"
        }
      ],
      "last_reviewed": "2026-05-06",
      "file": "cross-domain/mathematics-engineering/b-lyapunov-stability-nonlinear-control.yaml"
    },
    {
      "id": "b-optimization-theory-machine-learning",
      "title": "Convex optimization theory (KKT conditions, strong duality, convergence rates for gradient descent) provides the mathematical foundation for machine learning training, while empirical ML discoveries — the dominance of saddle points over local minima in high dimensions and the lottery ticket hypothesis — require extending classical theory beyond convexity.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "Convex optimization: minimize f(x) subject to x in C (convex set). The Lagrangian L(x,lambda,mu) = f(x) + lambda^T h(x) + mu^T g(x) and dual function g(lambda,mu) = inf_x L satisfy strong duality (primal = dual) under Slater's condition. KKT conditions are necessary and sufficient: stationarity, pri",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-lottery-ticket-sparse-subnetwork-universality"
      ],
      "communication_gap": "Convex optimization was developed by operations research and applied mathematics communities (Boyd and Vandenberghe textbook published 2004) largely before deep learning became dominant. Deep learning practitioners rarely use classical optimization convergence proofs in practice. The theoretical ML community has tried to bridge this gap but key questions (why does SGD generalize well? what is the correct convergence notion for non-convex neural networks?) remain open, keeping the communities somewhat separated.\n",
      "translation_table": [
        {
          "field_a_term": "KKT stationarity condition",
          "field_b_term": "zero-gradient condition at neural network training convergence",
          "note": "KKT is necessary at any local minimum; sufficient only for convex problems"
        },
        {
          "field_a_term": "condition number kappa = L/mu",
          "field_b_term": "learning rate sensitivity — large kappa requires small learning rate for stability",
          "note": "Adam mitigates large kappa by adapting per-parameter learning rates"
        },
        {
          "field_a_term": "Lagrange multiplier lambda",
          "field_b_term": "regularization coefficient (L2 regularization = Lagrange constraint on norm)",
          "note": "L2 regularization is equivalent to adding a quadratic constraint to the primal"
        },
        {
          "field_a_term": "saddle point (indefinite Hessian)",
          "field_b_term": "dominant critical point type in high-dimensional neural network loss landscape",
          "note": "Dauphin (2014): fraction of negative Hessian eigenvalues tracks loss value"
        },
        {
          "field_a_term": "L1 regularization (sparse solution)",
          "field_b_term": "winning ticket subnetwork (sparse trained-in-isolation subnetwork)",
          "note": "Lottery ticket hypothesis has connections to compressed sensing and L1-induced sparsity"
        }
      ],
      "references": [
        {
          "note": "Boyd & Vandenberghe (2004) Convex Optimization. Cambridge University Press"
        },
        {
          "note": "Kingma & Ba (2015) ICLR — Adam optimizer"
        },
        {
          "note": "Dauphin et al. (2014) NeurIPS 27 — saddle points in high-dimensional optimization"
        },
        {
          "note": "Frankle & Carlin (2019) ICLR — lottery ticket hypothesis"
        }
      ],
      "last_reviewed": "2026-05-06",
      "file": "cross-domain/mathematics-engineering/b-optimization-theory-machine-learning.yaml"
    },
    {
      "id": "b-origami-mathematics-computational-fold",
      "title": "Origami design is a computational geometry problem: any polyhedral surface can be folded from a flat sheet (Demaine-Tachi's universal fold theorem), and the fold sequence is computable using Lang's TreeMaker algorithm, which solves a constrained optimization problem mapping a tree graph (crease pattern skeleton) to a circle packing on a square, bridging combinatorial geometry and engineering design",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "Lang's TreeMaker algorithm formalizes origami design: a model's silhouette is described as a stick figure (tree graph) with branch lengths; TreeMaker finds a circle/ellipse packing on the square paper satisfying: (1) no circles overlap, (2) circle radii equal required flap lengths, (3) path constrai",
      "open_unknowns": [],
      "related_hypotheses": [],
      "communication_gap": "Origami artists design intuitively through trial and error while mathematicians have developed a complete computational theory of foldable surfaces; engineering applications (deployable space structures, medical stents, automotive airbags) require the mathematical fold design algorithms but rarely train engineers in the origami mathematics framework.",
      "translation_table": [
        {
          "field_a_term": "origami crease pattern (engineering)",
          "field_b_term": "planar graph with mountain/valley labeled edges satisfying flat-foldability conditions (mathematics)",
          "note": "A crease pattern is flat-foldable iff Maekawa's theorem (|M-V| = 2) and Kawasaki's theorem (alternating angle sum = π) hold at every vertex"
        },
        {
          "field_a_term": "origami model silhouette / appendages (engineering)",
          "field_b_term": "tree graph with edge-length labeled branches (mathematics)",
          "note": "TreeMaker: tree graph nodes = flap tips; branch lengths = desired flap lengths; algorithm optimizes circle packing"
        },
        {
          "field_a_term": "rigid foldability of polyhedral origami (engineering)",
          "field_b_term": "overconstrained mechanism with zero degree of freedom vs finite mechanism (mathematics)",
          "note": "Rigid origami folds without distorting panels; analyzed as a linkage mechanism with rotational DOF at creases"
        },
        {
          "field_a_term": "deployable structures inspired by origami (engineering)",
          "field_b_term": "configuration space path from flat to folded state in geometric constraint satisfaction (mathematics)",
          "note": "Miura fold, solar sail, stent deployment use origami kinematics; configuration path avoids self-intersection"
        }
      ],
      "references": [
        {
          "doi": "10.1007/978-1-4612-4092-4",
          "note": "Lang (1996) - TreeMaker algorithm for origami base design from tree graphs"
        },
        {
          "doi": "10.1145/2461912.2462004",
          "note": "Demaine & Tachi (2013) - origamizer: practical 3D origami construction"
        },
        {
          "doi": "10.1073/pnas.1805762115",
          "note": "Stern et al. (2018) - complexity of folding self-folding origami"
        }
      ],
      "last_reviewed": "2026-05-07",
      "file": "cross-domain/mathematics-engineering/b-origami-mathematics-computational-fold.yaml"
    },
    {
      "id": "b-queuing-theory-service-systems",
      "title": "Queuing Theory and Service Systems — Erlang's M/M/c model, Little's law, and Kingman's approximation govern wait times in hospitals, networks, and manufacturing",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "Queuing theory analyses systems where arriving customers wait for service. The canonical M/M/1 queue (Poisson arrivals at rate λ, exponential service times with rate μ) requires utilisation ρ = λ/μ < 1 for stability. The mean number in the system is L = ρ/(1−ρ), which diverges as ρ → 1 — the non-lin",
      "open_unknowns": [],
      "related_hypotheses": [],
      "communication_gap": "Queuing theory is mathematically sophisticated but its core insights — especially the nonlinearity of wait time near saturation and the pooling principle — are widely misapplied in operations management and healthcare. Hospital administrators routinely plan for 95%+ bed occupancy without recognising that queuing theory predicts catastrophic waiting above ~85% utilisation. The Erlang B/C formulas are standard in telecommunications but rarely taught in medical or public administration programmes.\n",
      "translation_table": [
        {
          "field_a_term": "utilisation ρ = λ/μ (traffic intensity)",
          "field_b_term": "fraction of time servers are busy",
          "note": "ρ must be strictly less than 1 for the queue to be stable; ρ > 1 means infinite queue growth"
        },
        {
          "field_a_term": "mean queue length L = ρ/(1−ρ) in M/M/1",
          "field_b_term": "average number of patients/customers waiting",
          "note": "Nonlinear: L doubles when ρ goes from 0.5 to 0.67; diverges as ρ → 1"
        },
        {
          "field_a_term": "Little's law L = λW",
          "field_b_term": "flow balance: mean inventory = throughput × mean lead time",
          "note": "Used in manufacturing (WIP = production rate × cycle time) and network engineering (buffer size estimation)"
        },
        {
          "field_a_term": "Kingman's (1962) G/G/1 formula",
          "field_b_term": "wait time approximation for general (non-exponential) service and arrival distributions",
          "note": "c_a = c_s = 1 (exponential) recovers M/M/1 exactly; higher variability always increases wait"
        },
        {
          "field_a_term": "M/M/c pooling effect",
          "field_b_term": "economies of scale in service systems: one large server pool outperforms N small ones",
          "note": "Hospital bed pooling: a shared ICU with 20 beds has lower wait probability than two 10-bed units"
        },
        {
          "field_a_term": "heavy-traffic approximation (ρ → 1)",
          "field_b_term": "performance of a service system near saturation",
          "note": "Heavy-traffic limit: W ~ 1/(μ(1-ρ)) — universal scaling independent of distribution details"
        }
      ],
      "references": [
        {
          "note": "Erlang (1917) Elektroteknikeren 13 — telephone exchange queuing, original M/M/c analysis"
        },
        {
          "doi": "10.1287/opre.9.3.383",
          "note": "Little (1961) Oper Res 9:383 — proof of Little's law"
        },
        {
          "note": "Kingman (1962) J R Stat Soc B 24:383 — G/G/1 heavy-traffic approximation"
        },
        {
          "note": "Kleinrock (1975) Queuing Systems — comprehensive textbook treatment"
        }
      ],
      "last_reviewed": "2026-05-06",
      "file": "cross-domain/mathematics-engineering/b-queuing-theory-service-systems.yaml"
    },
    {
      "id": "b-robust-control-h-infinity",
      "title": "H∞ optimal control minimises worst-case L²-induced gain ||T_{zw}||∞ ≤ γ via Riccati equations or LMI convex optimisation; equals a minimax Nash game between controller and adversarial disturbance; achieves 10 nm precision in hard-disk heads and flutter suppression in aircraft through structured uncertainty μ-synthesis.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "Classical LQR/LQG control minimises expected quadratic cost E[∫(x'Qx + u'Ru)dt] — optimal for Gaussian disturbances, but brittle to model uncertainty or adversarial inputs. H∞ control (Zames 1981) instead minimises the worst-case disturbance-to-output gain: ||T_{zw}||∞ = sup_{ω} σ_max[T_{zw}(jω)], t",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-robust-control-lmi-neural-network-stability-certificates"
      ],
      "communication_gap": "H∞ control theory was developed by control theorists (IEEE Trans Autom Control, Automatica, SIAM J Control Optim) and immediately adopted by aerospace and disk-drive engineers. Game theorists studying Nash equilibria in economics rarely engage with the H∞/DGKF differential game formulation despite formal identity. The LMI convex optimisation connection (Boyd et al. 1994 book) bridged control theory and optimisation, but remains largely unknown outside the control community. Neuroscientists studying motor control rarely read H∞ literature.\n",
      "translation_table": [
        {
          "field_a_term": "H∞ norm ||T_{zw}||∞ (worst-case gain over all frequencies)",
          "field_b_term": "maximum adversarial disturbance amplification — robustness margin"
        },
        {
          "field_a_term": "minimax differential game (controller vs. disturbance)",
          "field_b_term": "Nash equilibrium of zero-sum two-player control game"
        },
        {
          "field_a_term": "Riccati equation solution (DGKF state-space formulas)",
          "field_b_term": "Hamilton-Jacobi-Isaacs PDE solution of the differential game"
        },
        {
          "field_a_term": "LMI (linear matrix inequality) feasibility",
          "field_b_term": "convex optimisation certificate for robust stability"
        },
        {
          "field_a_term": "structured singular value μ (μ-synthesis)",
          "field_b_term": "tightest robustness bound for block-structured model uncertainty"
        },
        {
          "field_a_term": "small-gain theorem ||P||·||C|| < 1",
          "field_b_term": "Banach contraction mapping theorem applied to feedback loop"
        }
      ],
      "references": [
        {
          "doi": "10.1109/TAC.1981.1102603",
          "note": "Zames (1981) IEEE Trans Autom Control 26:301 — feedback and optimal sensitivity"
        },
        {
          "doi": "10.1109/9.29425",
          "note": "Doyle et al. (1989) IEEE Trans Autom Control 34:831 — state-space solutions to H2 and H∞ control"
        },
        {
          "note": "Zhou et al. (1996) Robust and Optimal Control — Prentice Hall"
        },
        {
          "doi": "10.1016/0005-1098(93)90175-S",
          "note": "Packard & Doyle (1993) Automatica 29:71 — complex structured singular value"
        }
      ],
      "last_reviewed": "2026-05-06",
      "file": "cross-domain/mathematics-engineering/b-robust-control-h-infinity.yaml"
    },
    {
      "id": "b-robust-statistics-outlier-detection",
      "title": "Robust statistics bridges mathematics and engineering: Huber's M-estimators, the 50% breakdown point of least trimmed squares, and RANSAC (Random Sample Consensus) provide principled methods for fitting models to corrupted data ΓÇö enabling reliable computer vision, GPS, robotics, and fraud detection.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "Classical statistics (OLS, sample mean) is fragile: a single outlier can arbitrarily corrupt the estimate. Robust statistics provides estimators with bounded influence on any data point. Huber (1964) M-estimator: minimize ╬ú╧ü(rß╡ó/╧â) where the loss function ╧ü(x) = x┬▓/2 for |x| < k (quadratic nea",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-robust-statistics-deep-learning-improves-noisy-label-training"
      ],
      "communication_gap": "Mathematical statisticians who develop robust estimators (breakdownpoint theory, asymptotics) rarely engage with computer vision engineers who use RANSAC daily. The deep learning community largely rediscovers robust loss functions (Huber loss is widely used in object detection regression heads) without engaging with the formal robust statistics theory. Practitioners in fraud detection and anomaly detection rarely cite the mathematical literature on breakdown points.\n",
      "translation_table": [
        {
          "field_a_term": "Huber loss ╧ü(x) (quadratic + linear tails)",
          "field_b_term": "robust regression objective; M-estimator with tuning constant k",
          "note": "k = 1.345 gives 95% asymptotic efficiency vs. OLS at Gaussian; adjustable"
        },
        {
          "field_a_term": "breakdown point ╬╡* = 50%",
          "field_b_term": "half the data can be replaced by arbitrary values without corrupting the estimate",
          "note": "LTS and repeated median achieve 50%; impossible to exceed 50% in symmetric estimation"
        },
        {
          "field_a_term": "influence function IF(x; T, F)",
          "field_b_term": "first-order sensitivity of estimator T to a point mass at x",
          "note": "bounded IF ΓåÆ robust; IF of sample mean is unbounded (linear in x)"
        },
        {
          "field_a_term": "RANSAC (Random Sample Consensus)",
          "field_b_term": "model fitting algorithm tolerating up to 50% outliers via random hypothesis testing",
          "note": "RANSAC does not require the model, data distribution, or outlier fraction to be known a priori"
        },
        {
          "field_a_term": "MM-estimator (S-step + M-step)",
          "field_b_term": "robust regression combining 50% breakdown with 95% Gaussian efficiency",
          "note": "gold standard for robust regression; Yohai (1987); implemented in R's lmrob"
        }
      ],
      "references": [
        {
          "doi": "10.1214/aoms/1177703732",
          "note": "Huber (1964) Robust estimation of a location parameter; Ann Math Stat 35:73"
        },
        {
          "note": "Hampel et al. (1986) Robust Statistics ΓÇö The Approach Based on Influence Functions; Wiley"
        },
        {
          "doi": "10.1145/358669.358692",
          "note": "Fischler & Bolles (1981) Random sample consensus ΓÇö a paradigm for model fitting with applications to image analysis; Commun ACM 24:381"
        },
        {
          "note": "Rousseeuw & Leroy (1987) Robust Regression and Outlier Detection; Wiley"
        }
      ],
      "last_reviewed": "2026-05-07",
      "file": "cross-domain/mathematics-engineering/b-robust-statistics-outlier-detection.yaml"
    },
    {
      "id": "b-wavelet-theory-signal-compression",
      "title": "Mallat's multiresolution analysis and Daubechies compactly-supported wavelets provide an O(N) fast wavelet transform achieving near-optimal signal compression, with JPEG-2000 using 9/7 biorthogonal wavelets for 40:1 compression and Donoho-Johnstone wavelet shrinkage achieving minimax-optimal denoising over Sobolev function classes.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "Wavelets provide a multi-resolution analysis (MRA) of signals: a nested sequence of approximation spaces V_j ⊂ V_{j+1} ⊂ L²(ℝ) with scaling function φ and wavelet ψ satisfying ⟨ψ(·-k), ψ(·-l)⟩ = δ_{kl}, ⟨φ(·-k), φ(·-l)⟩ = δ_{kl}. Mallat's fast wavelet transform (1989): decompose f ∈ V_{j+1} as f = A",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-wavelet-shrinkage-minimax-optimal-natural-image-sparsity"
      ],
      "communication_gap": "Wavelet theory was developed primarily by applied mathematicians (Daubechies, Mallat, Meyer — Mallat bridges signal processing and mathematics, Meyer won the 2017 Abel Prize). The electrical engineering signal processing community adopted wavelets for image compression (JPEG-2000) but often treats them as black-box filter banks without understanding the underlying MRA theory. The statistics community (Donoho) developed wavelet denoising theory independently. The neuroscience connection (V1 as Gabor wavelet bank) is well-known to computational neuroscientists but unfamiliar to both engineers and mathematicians.\n",
      "translation_table": [
        {
          "field_a_term": "scaling function φ and wavelet ψ in MRA",
          "field_b_term": "low-frequency (approximation) and high-frequency (edge/detail) image components",
          "note": "Wavelets are matched to human visual system: HVS most sensitive to contrast edges (ψ captures)"
        },
        {
          "field_a_term": "N vanishing moments of Daubechies wavelet",
          "field_b_term": "compression efficiency for smooth image regions",
          "note": "More vanishing moments = smoother wavelet = better compression for polynomial regions"
        },
        {
          "field_a_term": "soft thresholding at λ = σ√(2 log N)",
          "field_b_term": "denoising by coefficient sparsification (signal is sparse in wavelet domain)",
          "note": "The threshold λ is optimal (Stein's unbiased risk estimate) — a mathematical prediction validated empirically"
        },
        {
          "field_a_term": "Sobolev smoothness exponent s of function class",
          "field_b_term": "image texture roughness / compression difficulty",
          "note": "s determines optimal compression rate; s=1 is smooth images, s→0 is textures/fractals"
        },
        {
          "field_a_term": "discrete wavelet packet decomposition (full binary tree)",
          "field_b_term": "adaptive subband coding for audio compression (WMA, ATRAC)",
          "note": "Best basis selection (Coifman-Wickerhauser) chooses the tree decomposition minimising entropy"
        }
      ],
      "references": [
        {
          "doi": "10.1109/34.192463",
          "note": "Mallat (1989) A theory for multiresolution signal decomposition: the wavelet representation. IEEE Trans Pattern Anal Mach Intell 11:674"
        },
        {
          "doi": "10.1002/cpa.3160410705",
          "note": "Daubechies (1988) Orthonormal bases of compactly supported wavelets. Commun Pure Appl Math 41:909"
        },
        {
          "doi": "10.1093/biomet/81.3.425",
          "note": "Donoho & Johnstone (1994) Ideal spatial adaptation by wavelet shrinkage. Biometrika 81:425"
        },
        {
          "doi": "10.1109/83.136598",
          "note": "Antonini et al. (1992) Image coding using wavelet transform. IEEE Trans Image Process 1:205"
        }
      ],
      "last_reviewed": "2026-05-06",
      "file": "cross-domain/mathematics-engineering/b-wavelet-theory-signal-compression.yaml"
    },
    {
      "id": "b-game-theory-evolution",
      "title": "Nash equilibrium ↔ evolutionary stable strategy: game theory and natural selection are the same optimisation",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "Maynard Smith & Price (1973) showed that natural selection on heritable strategies converges to evolutionary stable strategies (ESS), which are exactly Nash equilibria of the payoff game defined by fitness interactions. The replicator equation dx_i/dt = x_i * (f_i(x) - mean_f(x)) — derived from popu",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-gan-training-redqueen-dynamics"
      ],
      "communication_gap": "Nash published his equilibrium theorem in 1950 in mathematics journals. Maynard Smith independently derived ESS in 1973 in evolutionary biology journals. The mathematical equivalence was not widely recognised until Hofbauer & Sigmund (1988) unified both in \"The Theory of Evolution and Dynamical Systems.\" The ML community rediscovered the same framework independently through GANs (2014) and RLHF (2017-2022) without systematically connecting to the 70-year-old evolutionary biology literature.\n",
      "translation_table": [
        {
          "field_a_term": "Nash equilibrium (game theory)",
          "field_b_term": "evolutionary stable strategy ESS (evolutionary biology)",
          "note": "Every ESS is a Nash equilibrium; not all Nash equilibria are ESS"
        },
        {
          "field_a_term": "payoff matrix A_ij",
          "field_b_term": "fitness payoff from strategy i against j",
          "note": "Matrix entries are fitness increments from pairwise interactions"
        },
        {
          "field_a_term": "rational agent best-response",
          "field_b_term": "natural selection (no rationality needed)",
          "note": "Equilibrium reached by selection, not reasoning"
        },
        {
          "field_a_term": "replicator equation (math/game theory)",
          "field_b_term": "selection-mutation dynamics (population genetics)",
          "note": "Identical ODE — different interpretations"
        },
        {
          "field_a_term": "zero-sum two-player game minimax",
          "field_b_term": "arms race / antagonistic coevolution",
          "note": "Red Queen dynamics as minimax optimisation"
        },
        {
          "field_a_term": "GAN training (ML)",
          "field_b_term": "host-parasite coevolution",
          "note": "Both are two-player zero-sum replicator dynamics"
        },
        {
          "field_a_term": "RLHF / Nash fine-tuning (ML)",
          "field_b_term": "frequency-dependent selection",
          "note": "LLM alignment uses Nash equilibrium reward models"
        }
      ],
      "references": [
        {
          "doi": "10.1038/246015a0",
          "note": "Maynard Smith & Price (1973) Nature — The logic of animal conflict; ESS definition"
        },
        {
          "doi": "10.1073/pnas.67.3.1092",
          "note": "Price (1970) — The Price equation; unification of selection theorems"
        },
        {
          "arxiv": "1406.2661",
          "note": "Goodfellow et al. (2014) — GANs as two-player Nash game"
        },
        {
          "doi": "10.1007/BF01941371",
          "note": "Taylor & Jonker (1978) — replicator equation derivation"
        }
      ],
      "last_reviewed": "2026-05-05",
      "file": "cross-domain/mathematics-evolution/b-game-theory-evolution.yaml"
    },
    {
      "id": "b-kin-selection-price-equation",
      "title": "Kin selection and Hamilton's rule (rB > C) are derived as a special case of the Price equation G = Cov(w,z) + E[w*Δz]: the genetic relatedness r is the regression coefficient b(z_j, z_i) of partner phenotype on focal individual's genotype, benefit B equals the selection gradient on partner phenotype, and the Price equation partitions total selection into direct and indirect (inclusive fitness) components",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "The Price equation G = Cov(w,z)/w̄ + E[w*Δz]/w̄ provides the mathematical foundation for kin selection: Hamilton's rule rB > C emerges when we partition total fitness w_i = (1-c)*z_i + b*z̄_relatives and apply the Price equation; the relatedness coefficient r is mathematically defined as the regress",
      "open_unknowns": [],
      "related_hypotheses": [],
      "communication_gap": "Evolutionary biologists debate inclusive fitness vs multilevel selection empirically and conceptually while mathematicians use the Price equation as a general change theorem; the equivalence between Hamilton's kin selection and multilevel selection (proven algebraically through the Price equation) remains controversial in evolutionary biology despite being mathematically straightforward.",
      "translation_table": [
        {
          "field_a_term": "genetic relatedness coefficient r (evolutionary biology)",
          "field_b_term": "regression coefficient of social partner genotype on focal genotype (mathematics)",
          "note": "r = Cov(g_partner, g_focal)/Var(g_focal); equals 1/2 for full siblings, 1/4 for half-siblings in Mendelian inheritance"
        },
        {
          "field_a_term": "Hamilton's rule rB > C (evolutionary biology)",
          "field_b_term": "condition for positive covariance term in Price equation partition (mathematics)",
          "note": "Altruism spreads when Cov(w,z) > 0 in the Price equation; rB > C is the linearized condition for this"
        },
        {
          "field_a_term": "inclusive fitness (evolutionary biology)",
          "field_b_term": "total selection gradient summed over direct and indirect (kin) fitness components (mathematics)",
          "note": "Inclusive fitness = direct fitness + Σ r_j * indirect effect on j; Price equation makes this partition rigorous"
        },
        {
          "field_a_term": "multilevel selection (evolutionary biology)",
          "field_b_term": "hierarchical partitioning of the Price equation across levels of organization (mathematics)",
          "note": "Multilevel Price: G = Cov(W_g, Z_g) + E[Cov_i(w_i, z_i)] where g = groups, i = individuals within groups"
        }
      ],
      "references": [
        {
          "doi": "10.1093/jhered/84.5.372",
          "note": "Price (1972) - extension of covariance selection mathematics: original Price equation"
        },
        {
          "doi": "10.1016/S0022-5193(64)80040-3",
          "note": "Hamilton (1964) - genetical evolution of social behaviour I: original inclusive fitness"
        },
        {
          "doi": "10.1111/j.1420-9101.2009.01828.x",
          "note": "Gardner et al. (2011) - meaning and misunderstanding of inclusive fitness: Price equation clarification"
        }
      ],
      "last_reviewed": "2026-05-07",
      "file": "cross-domain/mathematics-evolution/b-kin-selection-price-equation.yaml"
    },
    {
      "id": "b-ricci-curvature-x-price-equation-covariance-analogy",
      "title": "Ricci curvature from Riemannian geometry characterizes how volumes of small geodesic balls initially shrink or expand compared with Euclidean expectations — distinct but loosely evocative of the covariance structure in quantitative genetics captured by the Price equation Δz̄ = Cov(w,z)/w̄ + E[wΔz]/w̄, where selection responds to trait–fitness covariance rather than to traits alone.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "proposed",
      "bridge_claim": "This bridge is **explicitly speculative**: Ricci curvature measures second-order metric distortion along manifold directions, whereas Price's covariance term Cov(w,z) measures linear coupling between fitness and trait inheritance — different mathematical objects acting at different tensor ranks and ",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-fisher-ricci-price-covariance-analogy-calibration"
      ],
      "communication_gap": "Information geometry (Fisher metric curvature on statistical manifolds) is closer to genetics formally than Ricci curvature of physical configuration spaces, yet Ricci appears frequently in physics outreach analogies and Price covariance appears in evolutionary theory — leading casual readers to over-identify them without consulting Fisher–Rao geometry literature.\n",
      "translation_table": [
        {
          "field_a_term": "Ricci curvature Ric(v,v) on unit vectors",
          "field_b_term": "directional sensitivity of mean trait change to trait–fitness alignment (covariance term)",
          "note": "Speculative metaphor — curvature ≠ covariance algebraically; contrast via labeled analogy only."
        },
        {
          "field_a_term": "volume comparison / Bishop–Gromov monotonicity intuition",
          "field_b_term": "proportional changes in trait distributions under selection versus transmission bias terms",
          "note": "Loose parallelism — both regulate infinitesimal comparative volumes/changes; no quantitative isomorphism asserted."
        },
        {
          "field_a_term": "Laplacian / diffusion on manifolds linked to Ricci via Bochner formulas",
          "field_b_term": "diffusion terms in quantitative genetics / diffusion approximation for allele frequencies",
          "note": "Potential modeling pathway through diffusion generators on trait spaces — remains modeling speculation pending formalization."
        }
      ],
      "references": [
        {
          "doi": "10.1111/j.1420-9101.2012.02498.x",
          "note": "Frank (2012) — Natural selection IV; Price equation fundamentals (J Evol Biol)."
        },
        {
          "doi": "10.1038/227520a0",
          "note": "Price (1970) — Selection and covariance (Nature)."
        }
      ],
      "last_reviewed": "2026-05-07",
      "file": "cross-domain/mathematics-evolution/b-ricci-curvature-x-price-equation-covariance-analogy.yaml"
    },
    {
      "id": "b-black-scholes-heat-equation",
      "title": "The Black-Scholes option pricing PDE is the heat equation in disguise: the change of variables C(S,t) → u(x,τ) via x=ln(S/K) transforms it into ∂u/∂τ = σ²/2 · ∂²u/∂x²",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "The Black-Scholes PDE for a European call option price C(S,t): ∂C/∂t + (1/2)σ²S²·∂²C/∂S² + rS·∂C/∂S - rC = 0 becomes the standard heat (diffusion) equation after the substitution x=ln(S/K), τ=T-t, C=e^(αx+βτ)u(x,τ) with appropriate α,β: ∂u/∂τ = (σ²/2)·∂²u/∂x² The terminal condition C(S,T)=max(S-K,0)",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-black-scholes-heat-equation"
      ],
      "communication_gap": "The mathematical equivalence between Black-Scholes and the heat equation is presented in advanced mathematical finance textbooks but is rarely taught in physics curricula, even though physicists invented stochastic calculus (Bachelier preceded Einstein in applying Brownian motion to markets in 1900). Finance practitioners who derive the Black-Scholes formula algebraically often do not exploit the full PDE machinery available from mathematical physics.\n",
      "translation_table": [
        {
          "field_a_term": "stock price S(t) under risk-neutral measure",
          "field_b_term": "position of a diffusing particle x(t) = ln(S/S₀)",
          "note": "Itô's lemma gives dx = (r-σ²/2)dt + σdW — arithmetic Brownian motion in log price"
        },
        {
          "field_a_term": "volatility σ (annualized standard deviation of log returns)",
          "field_b_term": "diffusion coefficient D = σ²/2 in the heat equation",
          "note": "Higher volatility = faster diffusion = wider option price distributions"
        },
        {
          "field_a_term": "option time value theta = -∂C/∂t",
          "field_b_term": "temporal heat flux — rate of energy dissipation in the diffusion analogy",
          "note": "Theta is always negative for long options (time decay) = heat always flows forward"
        },
        {
          "field_a_term": "option delta Δ = ∂C/∂S",
          "field_b_term": "spatial gradient of the temperature field (first spatial derivative)",
          "note": "Delta hedging = maintaining zero gradient at the replication portfolio level"
        }
      ],
      "references": [
        {
          "doi": "10.1086/260062",
          "note": "Black & Scholes (1973) The pricing of options and corporate liabilities. J Polit Econ 81:637"
        },
        {
          "doi": "10.2307/3003143",
          "note": "Merton (1973) Theory of rational option pricing. Bell J Econ Manag Sci 4:141"
        },
        {
          "doi": "10.1080/13504860500117503",
          "note": "Wilmott, Howison & Dewynne (1995) The Mathematics of Financial Derivatives. Cambridge UP"
        }
      ],
      "last_reviewed": "2026-05-07",
      "file": "cross-domain/mathematics-finance/b-black-scholes-heat-equation.yaml"
    },
    {
      "id": "b-random-matrix-portfolio-optimization",
      "title": "Random matrix theory (Marchenko-Pastur law) identifies which eigenvalues of a financial covariance matrix carry genuine correlation signal versus statistical noise, providing an objective criterion for cleaning the matrix and dramatically improving Markowitz mean-variance portfolio optimization out-of-sample.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "The sample covariance matrix of N financial return series of length T has most eigenvalues distributed according to the Marchenko-Pastur law — the asymptotic distribution of eigenvalues of a random Wishart matrix when N/T = q is fixed. The bulk of eigenvalues lies within [λ_min, λ_max] = (1 ± √q)²/T",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-eigenvalue-cleaning-portfolio-performance"
      ],
      "communication_gap": "RMT was developed in nuclear physics (Wigner, Dyson) for modeling heavy nucleus energy levels and imported to finance by physicists (Laloux, Potters, Bouchaud at the Santa Fe Institute and École Normale). Financial economists were initially resistant to physics imports but Ledoit-Wolf shrinkage — an independently derived statistical approach that converges to RMT cleaning in the large-N limit — is now standard. The physics derivation is cleaner and more principled but less known among practitioners trained in econometrics.\n",
      "translation_table": [
        {
          "field_a_term": "Wishart random matrix eigenvalue distribution",
          "field_b_term": "null distribution of financial correlation matrix eigenvalues",
          "note": "Marchenko-Pastur law gives exact analytic form"
        },
        {
          "field_a_term": "eigenvalues outside Marchenko-Pastur bulk",
          "field_b_term": "genuine market factors (market mode, sector modes)",
          "note": "largest eigenvalue ≈ market beta; next few ≈ sector correlations"
        },
        {
          "field_a_term": "eigenvalue cleaning (replace bulk with MP average)",
          "field_b_term": "regularized covariance matrix for portfolio optimization",
          "note": "reduces estimation error; equivalent to Ledoit-Wolf shrinkage in limit"
        },
        {
          "field_a_term": "ratio q = N/T (matrix aspect ratio)",
          "field_b_term": "curse of dimensionality parameter in finance (assets / observations)",
          "note": "larger q = more noise; modern portfolios have q close to or above 1"
        },
        {
          "field_a_term": "Tracy-Widom distribution (largest eigenvalue fluctuations)",
          "field_b_term": "statistical significance threshold for market-wide shocks"
        },
        {
          "field_a_term": "free probability theory (addition/multiplication of random matrices)",
          "field_b_term": "composition of risk factors in multi-asset portfolios"
        }
      ],
      "references": [
        {
          "note": "Marchenko & Pastur (1967) — Distribution of eigenvalues for some sets of random matrices; Mat Sb 72:507"
        },
        {
          "doi": "10.1103/PhysRevLett.83.1467",
          "note": "Laloux et al. (1999) — Noise dressing of financial correlation matrices; Phys Rev Lett 83:1467"
        },
        {
          "doi": "10.1103/PhysRevLett.83.1471",
          "note": "Plerou et al. (1999) — Universal and nonuniversal properties of cross correlations in financial data; Phys Rev Lett 83:1471"
        },
        {
          "doi": "10.1016/S0047-259X(03)00096-4",
          "note": "Ledoit & Wolf (2004) — A well-conditioned estimator for large-dimensional covariance matrices; J Multivar Anal 88:365"
        }
      ],
      "last_reviewed": "2026-05-06",
      "file": "cross-domain/mathematics-finance/b-random-matrix-portfolio-optimization.yaml"
    },
    {
      "id": "b-stochastic-calculus-black-scholes",
      "title": "Itô stochastic calculus ↔ Black-Scholes option pricing — the heat equation in disguise",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "Itô calculus (1944) defines stochastic differential equations driven by Brownian motion dW, where the non-anticipating Itô integral and Itô's lemma — the stochastic chain rule — replace ordinary calculus. Applied to a self-financing hedged portfolio (Black & Scholes 1973, Merton 1973), Itô's lemma y",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-stochastic-volatility-as-turbulent-diffusion"
      ],
      "communication_gap": "The Black-Scholes paper (1973) was initially rejected by multiple journals because economists were unfamiliar with stochastic calculus. The heat equation connection was known to mathematical physicists but finance departments rarely included physicists. The field of quantitative finance emerged largely through practitioners (Fischer Black had a physics background) rather than academic cross-pollination. Today the connection is taught in mathematical finance programs but rarely in physics curricula — the bridge is one-directional.\n",
      "translation_table": [
        {
          "field_a_term": "Brownian motion W(t)",
          "field_b_term": "log-price random walk ln(S(t)/S₀)",
          "note": "Both are Wiener processes; volatility σ scales the diffusion coefficient"
        },
        {
          "field_a_term": "Itô's lemma (stochastic chain rule)",
          "field_b_term": "delta-hedging argument (no-arbitrage portfolio)",
          "note": "Itô's lemma applied to V(S,t) generates the PDE; hedging eliminates the dW term"
        },
        {
          "field_a_term": "heat equation ∂u/∂τ = D ∂²u/∂x²",
          "field_b_term": "Black-Scholes PDE (after log-price substitution)",
          "note": "D = ½σ²; the two equations are algebraically identical under variable substitution"
        },
        {
          "field_a_term": "heat kernel / Gaussian propagator G(x,τ)",
          "field_b_term": "Black-Scholes log-normal distribution of future prices",
          "note": "The fundamental solution of the heat equation gives the risk-neutral price distribution"
        },
        {
          "field_a_term": "initial temperature distribution (boundary condition)",
          "field_b_term": "option payoff function max(S−K, 0) at expiry",
          "note": "Both specify the boundary condition that the PDE is solved backward from"
        },
        {
          "field_a_term": "thermal diffusivity D",
          "field_b_term": "implied volatility σ",
          "note": "Calibrating σ from market prices ≡ measuring thermal diffusivity from temperature data"
        }
      ],
      "references": [
        {
          "note": "Itô (1944) — stochastic integral and Itô's lemma",
          "url": "https://doi.org/10.52547/ijmsi.15.2.111"
        },
        {
          "doi": "10.1086/260062",
          "note": "Black & Scholes (1973) J Polit Econ 81:637 — the original option pricing formula"
        },
        {
          "doi": "10.2307/3003143",
          "note": "Merton (1973) Bell J Econ 4:141 — continuous-time finance and the PDE derivation"
        },
        {
          "note": "Shreve (2004) Stochastic Calculus for Finance II — graduate textbook connecting Itô calculus to Black-Scholes",
          "url": "https://link.springer.com/book/9780387401010"
        }
      ],
      "last_reviewed": "2026-05-06",
      "file": "cross-domain/mathematics-finance/b-stochastic-calculus-black-scholes.yaml"
    },
    {
      "id": "b-zipf-law-information-efficiency",
      "title": "Zipf's law (word frequency f_r ∝ r^{-α}, α ≈ 1) emerges from entropy maximisation in communication systems — it is the signature of a channel operating at maximum communicative efficiency minimising joint speaker-listener effort, and the same power law appears in city sizes, income distributions, citation counts, and any rank-frequency distribution generated by an entropy-maximising process under a frequency constraint.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "Zipf (1935, 1949) documented that in any natural language corpus the r-th most frequent word has frequency f_r ≈ C / r (Zipf's law, exponent α = 1 exactly). He proposed a \"principle of least effort\": speakers prefer short, common words (minimise production effort); listeners prefer long, specific wo",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-zipf-critical-point-communication-efficiency"
      ],
      "communication_gap": "Zipf's law is discussed in linguistics, economics, geography, and physics journals with almost no cross-citation. Linguists emphasise cognitive explanations (memory, word production); physicists emphasise statistical mechanics analogies; economists emphasise power-law firm/city size distributions. Mandelbrot's 1953 paper (written in French, published in a mathematics communication journal) is rarely cited in linguistics; Zipf's original books are rarely cited in physics. Piantadosi (2014) in a psychology journal is the clearest synthesis but reaches neither community consistently.\n",
      "translation_table": [
        {
          "field_a_term": "Boltzmann distribution (physics, entropy max at fixed energy)",
          "field_b_term": "Zipf distribution (linguistics, entropy max at fixed mean cost)"
        },
        {
          "field_a_term": "energy E of microstate (physics)",
          "field_b_term": "cost c(r) of using rank-r word (information theory)"
        },
        {
          "field_a_term": "temperature T (physics)",
          "field_b_term": "trade-off parameter β between speaker and listener effort"
        },
        {
          "field_a_term": "partition function Z = Σ exp(-E_i/kT)",
          "field_b_term": "Zipf normalisation constant C = Σ r^{-α}  (Hurwitz zeta function)"
        },
        {
          "field_a_term": "maximum entropy phase transition (critical temperature)",
          "field_b_term": "Zipf exponent α = 1 (critical point of speaker-listener game)"
        },
        {
          "field_a_term": "Huffman code (optimal prefix-free coding)",
          "field_b_term": "natural-language lexicon length distribution (log-normal fit)"
        }
      ],
      "references": [
        {
          "url": "https://www.hup.harvard.edu/catalog.php?isbn=9780674445505",
          "note": "Zipf (1949) Human Behavior and the Principle of Least Effort — Addison-Wesley, foundational empirical work"
        },
        {
          "doi": "10.3758/s13423-014-0585-6",
          "note": "Piantadosi (2014) Zipf's word frequency law in natural language — a critical review, Psychon Bull Rev"
        },
        {
          "doi": "10.1080/09296170312331290510",
          "note": "Ferrer i Cancho & Solé (2003) Least effort and the origins of scaling in human language, J Quant Linguist"
        },
        {
          "doi": "10.1038/238413a0",
          "note": "Mandelbrot (1953) — information-theoretic derivation of Zipf's law (cited via May 1972; original is Comm Math Phys 1953)"
        },
        {
          "doi": "10.1103/PhysRev.106.620",
          "note": "Jaynes (1957) Information theory and statistical mechanics — entropy maximisation formalism"
        }
      ],
      "last_reviewed": "2026-05-05",
      "file": "cross-domain/mathematics-linguistics/b-zipf-law-information-efficiency.yaml"
    },
    {
      "id": "b-cut-cell-finite-volume-x-voxel-medical-segmentation",
      "title": "Cartesian cut-cell and embedded-boundary finite-volume methods conservatively integrate hyperbolic conservation laws on grids that intersect curved interfaces — conceptually adjacent to voxelized medical image segmentation where partial-volume effects allocate tissue fractions across cubic cells, though clinical pipelines emphasize learned classifiers rather than explicit finite-volume flux bookkeeping.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "proposed",
      "bridge_claim": "Finite-volume schemes maintain discrete conservation ∑ F·n Δt across faces; cut-cell methods redistribute fluxes when an embedded boundary slices Cartesian cells. Voxel segmentation assigns partial tissue labels per voxel analogous to volume fractions — **speculative isomorphism**: both manipulate c",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-cut-cell-conservative-flux-reduces-leakage-medical-seg"
      ],
      "communication_gap": "CFD workshops publish cut-cell stability proofs while MICCAI emphasizes benchmark Dice scores — partial-volume physics appears in MRI physics circles but rarely cites finite-volume literature vocabulary.\n",
      "translation_table": [
        {
          "field_a_term": "cut-cell volume fraction α in intersected Cartesian cell",
          "field_b_term": "partial-volume fraction of tissue class i in MRI voxel",
          "note": "Shared geometric fraction idea — different validation targets (flux stability vs Dice score)."
        },
        {
          "field_a_term": "ghost-fluid / face flux redistribution near boundaries",
          "field_b_term": "boundary-aware convolution / surface-aware loss weighting in segmentation networks",
          "note": "Algorithmic analogy — not identical mathematics."
        },
        {
          "field_a_term": "small-cell stability fixes (merging, redistribution)",
          "field_b_term": "morphological post-processing / topology constraints on segmentations",
          "note": "Both tackle irregular small features near interfaces."
        }
      ],
      "references": [
        {
          "doi": "10.1016/0021-9991(89)90035-1",
          "note": "Berger & Colella (1989) — local adaptive mesh refinement for shock hydrodynamics (embedded-boundary / refinement lineage informing cut-cell methods)."
        },
        {
          "arxiv": "1505.04597",
          "note": "Ronneberger et al. (2015) — U-Net convolutional networks for biomedical image segmentation (MICCAI; arXiv preprint widely cited)."
        }
      ],
      "last_reviewed": "2026-05-07",
      "file": "cross-domain/mathematics-medicine/b-cut-cell-finite-volume-x-voxel-medical-segmentation.yaml"
    },
    {
      "id": "b-first-passage-time-x-clinical-deterioration-early-warning",
      "title": "First-passage-time theory bridges stochastic threshold crossing and clinical deterioration warning models.",
      "source_domain": "",
      "target_domain": "",
      "status": "proposed",
      "bridge_claim": "Speculative analogy: Patient deterioration alerts can be posed as first-passage events of latent physiological processes crossing risk boundaries, importing hazard calibration methods from stochastic process theory.",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-first-passage-hitting-time-models-extend-clinical-warning-lead-time"
      ],
      "communication_gap": "Communities use different terminology and validation conventions, masking transferable method equivalence.",
      "translation_table": [],
      "references": [
        {
          "url": "https://www.nature.com/articles/s41591-018-0263-8",
          "note": "Clinical early warning benchmark context."
        }
      ],
      "last_reviewed": "2026-05-08",
      "file": "cross-domain/mathematics-medicine/b-first-passage-time-x-clinical-deterioration-early-warning.yaml"
    },
    {
      "id": "b-fisher-information-cramer-rao-x-dose-spacing-experimental-design",
      "title": "Fisher information and the Cramer-Rao bound translate dose-spacing choices in medical experiments into parameter-precision limits: sampling doses where response curves are most informative can reduce uncertainty without increasing participant burden.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "The bridge connects statistical information geometry to practical dose-ranging design. It supports simulation and design diagnostics, not automatic claims about clinical benefit or ethical acceptability.\n",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-fisher-optimal-dose-grid-reduces-parameter-variance-simulation"
      ],
      "communication_gap": "Biostatisticians use design criteria, while clinicians reason in familiar dose levels and safety margins; the precision bound is rarely visualized.\n",
      "translation_table": [
        {
          "field_a_term": "Fisher information matrix",
          "field_b_term": "expected precision for dose-response parameters",
          "note": "Information is model-dependent."
        },
        {
          "field_a_term": "Cramer-Rao lower bound",
          "field_b_term": "minimum achievable estimator variance under assumptions",
          "note": "Violations flag model risk."
        },
        {
          "field_a_term": "optimal design criterion",
          "field_b_term": "candidate dose grid and allocation rule",
          "note": "Ethics constraints remain binding."
        }
      ],
      "references": [
        {
          "doi": "10.1098/rsta.1922.0009",
          "note": "Fisher (1922) foundational likelihood/statistical estimation paper."
        }
      ],
      "last_reviewed": "2026-05-07",
      "file": "cross-domain/mathematics-medicine/b-fisher-information-cramer-rao-x-dose-spacing-experimental-design.yaml"
    },
    {
      "id": "b-hopf-bifurcation-x-cardiac-alternans",
      "title": "Period-doubling alternans in cardiac tissue — beat-to-beat alternation of action potential duration or calcium transient amplitude — arises through nonlinear ionic dynamics that can be organized by Hopf and homoclinic bifurcations in spatially extended models, linking bifurcation theory to clinically measured electrical instability precursors.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "In reduced ion-channel models, alternans appears when gain and refractoriness produce subharmonic or quasi-periodic dynamics consistent with crossing bifurcations of periodic orbits (often analyzed via semi-local stability of spiral waves and discordant alternans). Restitution curves map pacing para",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-bifurcation-continuation-predicts-alternans-onset-optical-mapping"
      ],
      "communication_gap": "Clinical electrophysiology curricula emphasize restitution and spatial dispersion without routinely naming bifurcation-normal-form language; nonlinear dynamics textbooks illustrate Hopf bifurcations using engineered systems rather than paced cardiac cables — leaving quantitative translation tables instructor-dependent.\n",
      "translation_table": [
        {
          "field_a_term": "Hopf bifurcation / loss of stability of a periodic orbit",
          "field_b_term": "onset of APD or calcium alternans at critical pacing cycle length",
          "note": "Qualitative analogy — cardiac tissue uses pacing maps and spatial coupling; Hopf structure appears in partial reductions but not universally across full ionic models."
        },
        {
          "field_a_term": "normal-form amplitude equation near bifurcation",
          "field_b_term": "steepness of restitution / calcium cycling gain near alternans threshold",
          "note": "Gain increases correspond to approaching bifurcation surfaces in parameter space."
        },
        {
          "field_a_term": "period-doubling cascade (smooth maps / flows)",
          "field_b_term": "progression from concordant to discordant alternans and fibrillation risk",
          "note": "Clinical trajectory is multifactorial; dynamical analogy motivates monitoring thresholds not mechanical identity."
        }
      ],
      "references": [
        {
          "doi": "10.1103/PhysRevLett.83.3844",
          "note": "Qu et al. (1999) — nonlinear dynamics of cardiac alternans in paced cable models (PRL)"
        },
        {
          "doi": "10.1103/PhysRevE.76.021917",
          "note": "Echebarria & Karma (2007) — instability and bifurcation structure linked to alternans in ionic models"
        }
      ],
      "last_reviewed": "2026-05-07",
      "file": "cross-domain/mathematics-medicine/b-hopf-bifurcation-x-cardiac-alternans.yaml"
    },
    {
      "id": "b-persistent-homology-rr-intervals-x-arrhythmia-early-warning",
      "title": "Persistent homology of RR-interval dynamics provides topology-based early warning for arrhythmia transitions.",
      "source_domain": "",
      "target_domain": "",
      "status": "proposed",
      "bridge_claim": "Topological summaries of sliding-window cardiac time-series can capture state-transition structure missed by threshold statistics. This extends established TDA disease-subtyping ideas into real-time rhythm surveillance.",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-persistent-h1-rise-precedes-afib-onset"
      ],
      "communication_gap": "Adjacent communities use different software stacks and validation norms, so mathematically equivalent tools are often rediscovered in parallel.",
      "translation_table": [
        {
          "field_a_term": "Persistence diagram",
          "field_b_term": "Rhythm-state topology summary",
          "note": "Separates robust dynamical features from noise."
        },
        {
          "field_a_term": "Betti-1 persistence",
          "field_b_term": "Cycle complexity in beat-interval dynamics",
          "note": "Higher persistence can indicate unstable pre-arrhythmic regimes."
        },
        {
          "field_a_term": "Topological distance",
          "field_b_term": "Patient-state divergence metric",
          "note": "Supports individualized alarm calibration."
        }
      ],
      "references": [
        {
          "doi": "10.1073/pnas.1102826108",
          "note": "Nicolau et al. (2011) topology-based biomedical subgroup discovery."
        },
        {
          "doi": "10.1090/bull/1506",
          "note": "Carlsson (2009) topology and data overview."
        }
      ],
      "last_reviewed": "2026-05-08",
      "file": "cross-domain/mathematics-medicine/b-persistent-homology-rr-intervals-x-arrhythmia-early-warning.yaml"
    },
    {
      "id": "b-spectral-clustering-x-metabolite-similarity-network-modules",
      "title": "Spectral clustering on similarity graphs bridges spectral graph theory with metabolomics workflows that infer biochemical modules from covariance or correlation networks.",
      "source_domain": "",
      "target_domain": "",
      "status": "proposed",
      "bridge_claim": "Established ML workflow uses Laplacian eigenvectors to partition similarity graphs; speculative analogy for metabolomics—batch effects and compositionality can distort similarity geometry so spectral partitions require explicit negative controls and stability screening.",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-graph-laplacian-regularization-improves-module-replicability"
      ],
      "communication_gap": "Spectral methods are taught as elegant linear algebra while metabolomics practice centers instrument QC pipelines that alter inferred graphs nonlinearly.",
      "translation_table": [
        {
          "field_a_term": "graph Laplacian spectrum",
          "field_b_term": "module eigen-separation in correlation graphs",
          "note": "Eigen-gap heuristics assume near-block structure rarely guaranteed in omics."
        },
        {
          "field_a_term": "normalized Laplacian variants",
          "field_b_term": "degree-normalized metabolite coupling",
          "note": "Normalization choices interact with measurement scales."
        },
        {
          "field_a_term": "clustering instability under perturbations",
          "field_b_term": "batch-effect sensitivity of modules",
          "note": "Stability analysis becomes essential scientific QC."
        }
      ],
      "references": [
        {
          "arxiv": "0711.0189",
          "note": "Spectral clustering tutorial reference for graph Laplacian methodology."
        }
      ],
      "last_reviewed": "2026-05-09",
      "file": "cross-domain/mathematics-medicine/b-spectral-clustering-x-metabolite-similarity-network-modules.yaml"
    },
    {
      "id": "b-topology-disease-progression",
      "title": "Topological Data Analysis (persistent homology, Betti numbers, the Mapper algorithm) classifies the shape of high-dimensional patient data spaces and reveals disease progression trajectories and subtypes that are invisible to distance-based clustering — because the relevant structure is topological (connected components, loops, voids) rather than metric.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "Nicolau et al. (2011) applied the Mapper algorithm (Singh, Mémoli & Carlsson 2007) — which builds a topological skeleton of a point cloud in high-dimensional space — to a breast cancer microarray dataset of 295 patients. Mapper revealed a small \"flare\" subgroup of estrogen-receptor-positive patients",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-tda-cancer-subtype-prognosis-superiority"
      ],
      "communication_gap": "Topological data analysis is primarily published in mathematics and computational geometry venues (Annals of Mathematics, SIAM Journal on Computing, Foundations of Computational Mathematics). Clinical medicine researchers are generally unaware of TDA tools beyond PCA and clustering. The Nicolau et al. (2011) PNAS paper was widely cited in TDA and bioinformatics but has not led to clinical adoption, partly because TDA requires mathematical training not common in medical research, and partly because software implementations (Ripser, Gudhi, KeplerMapper) have not been packaged with clinical-grade documentation and validation. The result is that a powerful set of shape-analysis tools — provably superior to clustering for certain data topologies — remains unused in clinical data analysis.\n",
      "translation_table": [
        {
          "field_a_term": "Betti number β₀ (count of connected components)",
          "field_b_term": "Number of disconnected disease subtypes in patient population",
          "note": "High β₀ means patients cluster into separate disease states with no continuous transitions"
        },
        {
          "field_a_term": "Betti number β₁ (count of independent loops / 1-cycles)",
          "field_b_term": "Cyclic disease progression routes (recurrence, reversion to earlier state)",
          "note": "β₁ > 0 indicates that some patients return to earlier disease states — cyclic trajectories"
        },
        {
          "field_a_term": "Persistence diagram (birth-death pairs for each topological feature)",
          "field_b_term": "Transient vs. stable disease states along progression trajectory",
          "note": "Short persistence = transient state visited briefly; long persistence = stable attractor state"
        },
        {
          "field_a_term": "Vietoris-Rips complex built from pairwise patient distances",
          "field_b_term": "Patient similarity network derived from clinical/omics data",
          "note": "The simplicial complex encodes all multi-patient relationships, not just pairwise ones"
        },
        {
          "field_a_term": "Mapper algorithm — topological skeleton of high-D point cloud",
          "field_b_term": "Disease subtype discovery: topological flares = novel subtypes",
          "note": "Nicolau et al. (2011) found a survival-predictive breast cancer subtype invisible to clustering"
        },
        {
          "field_a_term": "Wasserstein distance between persistence diagrams",
          "field_b_term": "Topological dissimilarity between two patient cohorts or disease conditions",
          "note": "Provides a metric on the space of disease topologies robust to small data perturbations"
        }
      ],
      "references": [
        {
          "doi": "10.1073/pnas.1102826108",
          "note": "Nicolau, Levine & Carlsson (2011) PNAS — \"Topology based data analysis identifies a subgroup of breast cancers with a unique mutational profile and excellent survival\"; landmark application of Mapper to cancer genomics\n"
        },
        {
          "doi": "10.1090/bull/1506",
          "note": "Carlsson (2009) Bull AMS — \"Topology and data\"; foundational review of persistent homology and Mapper for applied mathematics; accessible introduction to the framework\n"
        },
        {
          "doi": "10.1038/srep01236",
          "note": "Lum et al. (2013) Sci Rep — \"Extracting insights from the shape of complex data using topology\"; Mapper applied to basketball player performance, diabetes progression, and House of Representatives voting data\n"
        },
        {
          "doi": "10.1371/journal.pcbi.1004374",
          "note": "Camara et al. (2016) PLOS Comput Biol — \"Topological data analysis generates high-resolution, genome-wide maps of human recombination\"; TDA for genomic structure\n"
        },
        {
          "doi": "10.1007/978-3-540-33259-6_7",
          "note": "Singh, Mémoli & Carlsson (2007) Eurographics Symp on Point-Based Graphics — \"Topological methods for the analysis of high dimensional data sets and 3D object recognition\"; original Mapper algorithm paper\n"
        }
      ],
      "last_reviewed": "2026-05-05",
      "file": "cross-domain/mathematics-medicine/b-topology-disease-progression.yaml"
    },
    {
      "id": "b-mycelial-networks-minimum-spanning-trees",
      "title": "Mycelial transport networks of wood-decay fungi grow to topologies that approximate minimum spanning trees (MST) connecting nutrient sources while also maintaining fault-tolerant looping edges, exhibiting the same trade-off between cost and resilience that optimal network design theory predicts and that is observed in slime mold and mammalian vascular networks",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "Mycelial networks are self-organized physical graphs connecting resource nodes; their Steiner-tree-like minimization of total hyphal length subject to transport efficiency constraints produces topologies that score within 5% of the MST cost while adding redundant loops for resilience; the effective ",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-mycelial-network-mst-approximation"
      ],
      "communication_gap": "Mycologists study fungal network growth and physiology while network scientists analyze graph topology and optimization; the quantitative connection was made by Fricker et al. and Bebber et al. in the 2000s–2010s but is not yet standard in mycology curricula or in practical network design engineering.",
      "translation_table": [
        {
          "field_a_term": "hyphal anastomosis forming mycelial loops (mycology)",
          "field_b_term": "redundant non-tree edges added to a minimum spanning tree for fault tolerance (mathematics)",
          "note": "Loop formation adds graph cycles that provide alternative paths; cost is extra hyphal material vs. MST"
        },
        {
          "field_a_term": "nutrient transport flux through hyphal cord network (mycology)",
          "field_b_term": "electrical current flow in resistor network (Kirchhoff's law) (mathematics)",
          "note": "Hyphal tubes act as resistors; driving pressure replaces voltage; flux proportional to pressure drop per conductance"
        },
        {
          "field_a_term": "mycelial network rewiring after severing (mycology)",
          "field_b_term": "network rerouting after edge deletion in a resilient graph (mathematics)",
          "note": "Fungi dynamically reroute transport through remaining connections; same as k-connectivity resilience analysis"
        },
        {
          "field_a_term": "foraging front vs. established cord network (mycology)",
          "field_b_term": "exploration vs. exploitation trade-off in network growth (mathematics)",
          "note": "Fine exploratory hyphae form a random graph that is pruned to a near-MST cord network upon resource discovery"
        }
      ],
      "references": [
        {
          "doi": "10.1371/journal.pbio.1000228",
          "note": "Bebber et al. (2007) - biological solutions to transport network design (Steiner tree MST analysis)"
        },
        {
          "doi": "10.1073/pnas.0707492104",
          "note": "Tero et al. (2010) - rules for biologically inspired adaptive network design (slime mold MST parallel)"
        },
        {
          "doi": "10.1016/j.fgb.2007.02.013",
          "note": "Fricker et al. (2007) - electrical network analysis of mycelial transport: modeling"
        }
      ],
      "last_reviewed": "2026-05-07",
      "file": "cross-domain/mathematics-mycology/b-mycelial-networks-minimum-spanning-trees.yaml"
    },
    {
      "id": "b-bayesian-brain-predictive-coding",
      "title": "Friston's free energy principle — the brain as a hierarchical generative model minimising variational free energy F = KL[q(θ)||p(θ|data)] ≥ −log p(data) — unifies Bayesian inference, predictive coding, perception, action, and attention as gradient descent on surprise, with clinical implications for hallucination and schizophrenia as precision-weighting failures.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "The predictive coding framework (Rao & Ballard 1999) proposes that cortical processing is bidirectional: top-down connections carry predictions x̂_L = f(x_{L+1}) from higher to lower levels, while bottom-up connections carry only prediction errors ε_L = x_L − x̂_L. The hierarchical generative model ",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-precision-weighting-schizophrenia-nmda-receptor"
      ],
      "communication_gap": "The predictive coding / free energy principle literature is concentrated in a small community around Friston (UCL) and is sometimes criticised as unfalsifiable (too many free parameters). Computational neuroscientists (who use Bayesian models but not FEP) and experimental neuroscientists (who measure neural activity but distrust abstract frameworks) rarely engage deeply with FEP formalism. Statisticians familiar with variational inference rarely read neuroscience journals. The clinical psychiatry implications require simultaneous knowledge of psychopharmacology, computational psychiatry, and Bayesian statistics — crossing five separate fields.\n",
      "translation_table": [
        {
          "field_a_term": "variational free energy F = KL[q||p] − log p(data)",
          "field_b_term": "brain's implicit cost function — quantity neural activity minimises"
        },
        {
          "field_a_term": "generative model p(data|θ) (probabilistic model of sensory causes)",
          "field_b_term": "hierarchical cortical predictive model (top-down connections)"
        },
        {
          "field_a_term": "prediction error ε_L = x_L − x̂_L",
          "field_b_term": "superficial pyramidal cell activity (bottom-up \"error units\")"
        },
        {
          "field_a_term": "precision matrix Π_L = Σ_L^{−1} (inverse covariance)",
          "field_b_term": "attentional gain — neural precision-weighting of prediction errors"
        },
        {
          "field_a_term": "variational Bayes update rule (gradient descent on free energy)",
          "field_b_term": "synaptic plasticity (Hebbian-like learning updating generative model)"
        },
        {
          "field_a_term": "active inference (action minimises expected free energy)",
          "field_b_term": "motor control driven by predicted sensory states, not error"
        }
      ],
      "references": [
        {
          "doi": "10.1038/nn0199_79",
          "note": "Rao & Ballard (1999) Nat Neurosci 2:79 — predictive coding in the visual cortex"
        },
        {
          "doi": "10.1098/rstb.2005.1622",
          "note": "Friston (2005) Philos Trans R Soc B 360:815 — theory of cortical responses"
        },
        {
          "doi": "10.1017/S0140525X12000477",
          "note": "Clark (2013) Behav Brain Sci 36:181 — whatever next? Predictive brains, situated agents"
        },
        {
          "doi": "10.1016/j.neubiorev.2016.10.012",
          "note": "Friston et al. (2017) Neurosci Biobehav Rev 77:388 — active inference, curiosity and insight"
        }
      ],
      "last_reviewed": "2026-05-06",
      "file": "cross-domain/mathematics-neuroscience/b-bayesian-brain-predictive-coding.yaml"
    },
    {
      "id": "b-dynamical-systems-neural-oscillations",
      "title": "Nonlinear dynamical systems theory ↔ neural oscillations and brain rhythms — bifurcations at cognitive boundaries",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "Neural populations exhibit characteristic oscillations (alpha 8-12 Hz, gamma 30-80 Hz, theta 4-8 Hz, beta 12-30 Hz) whose emergence, frequency, and amplitude are governed by the bifurcation structure of the underlying nonlinear dynamical system. The Wilson-Cowan equations (1972) describe coupled exc",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-seizure-bifurcation-early-warning-signal"
      ],
      "communication_gap": "Wilson and Cowan were neuroscientists using mathematics, but the dynamical systems language became dominant in computational neuroscience only in the 1990s (Rinzel, Ermentrout). Neurologists studying seizures rarely use bifurcation theory despite its predictive power. The 'neuro-dynamicists' and clinical neurologists publish in separate journals. Mathematicians with expertise in nonlinear dynamics rarely have neuroscience collaborators and vice versa.\n",
      "translation_table": [
        {
          "field_a_term": "fixed point (stable equilibrium)",
          "field_b_term": "tonic neural firing rate / resting state",
          "note": "The brain at rest or under general anaesthesia at a dynamical fixed point"
        },
        {
          "field_a_term": "limit cycle",
          "field_b_term": "sustained neural oscillation (gamma, theta, alpha)",
          "note": "Periodic orbit in E-I phase space; frequency determined by eigenvalues at the Hopf bifurcation"
        },
        {
          "field_a_term": "Hopf bifurcation (stable fixed point → limit cycle)",
          "field_b_term": "oscillation onset as excitation increases",
          "note": "Critical slowing down predicts the gamma oscillation onset threshold"
        },
        {
          "field_a_term": "bifurcation parameter (excitation/inhibition ratio E/I)",
          "field_b_term": "neuromodulator levels (dopamine, acetylcholine, GABA)",
          "note": "Pharmacological interventions shift the E/I ratio and the bifurcation structure"
        },
        {
          "field_a_term": "chaotic attractor (sensitivity to initial conditions)",
          "field_b_term": "irregular neural firing / cognitive flexibility",
          "note": "Moderate chaos may be functionally beneficial — 'edge of chaos' hypothesis"
        },
        {
          "field_a_term": "phase locking / synchronisation between oscillators",
          "field_b_term": "neural synchrony (gamma synchronisation in visual binding)",
          "note": "Kuramoto model predicts synchronisation threshold in large neural populations"
        }
      ],
      "references": [
        {
          "doi": "10.1016/S0006-3495(72)86068-5",
          "note": "Wilson & Cowan (1972) Biophys J 12:1 — excitatory-inhibitory neural population equations"
        },
        {
          "doi": "10.1073/pnas.81.10.3088",
          "note": "Hopfield (1984) PNAS 81:3088 — neural networks with emergent computational properties"
        },
        {
          "note": "Strogatz (1994) Nonlinear Dynamics and Chaos. Westview Press. — standard textbook",
          "url": "https://www.routledge.com/Nonlinear-Dynamics-and-Chaos/Strogatz/p/book/9780813349107"
        },
        {
          "note": "Buzsáki (2006) Rhythms of the Brain. Oxford University Press.",
          "doi": "10.1093/acprof:oso/9780195301069.001.0001"
        }
      ],
      "last_reviewed": "2026-05-06",
      "file": "cross-domain/mathematics-neuroscience/b-dynamical-systems-neural-oscillations.yaml"
    },
    {
      "id": "b-grid-cells-hexagonal-lattice-fourier",
      "title": "Grid cells in the medial entorhinal cortex fire at positions forming a triangular (hexagonal) lattice across an environment, and this spatial firing pattern is mathematically equivalent to a superposition of three plane waves at 60-degree angles — identical to the lowest Fourier basis functions on a hexagonal lattice — providing a neural coordinate system whose algebraic properties enable path integration by vector addition in a periodic latent space",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "A grid cell's spatial firing field r(x) = sum_{k=1}^{3} cos(k_j . x + phi_j) where k_j are three wave vectors at 60-degree angles with magnitude 2pi/lambda (lambda = grid spacing); this three-wave superposition is precisely the real-valued representation of the first irreducible representations of t",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-grid-cell-torus-manifold-decoding"
      ],
      "communication_gap": "Neuroscientists studying grid cells focus on single-cell properties and behavioral correlates while mathematicians studying periodic lattices rarely engage with neural data; the Fourier/lattice connection was made explicit by Moser's group and by Yoon et al. (2013) but algebraic topology methods (torus manifold analysis) entered the field only recently via the Chaudhuri-Fiete papers.",
      "translation_table": [
        {
          "field_a_term": "grid cell firing rate map r(x,y) (neuroscience)",
          "field_b_term": "sum of three plane waves on hexagonal lattice / discrete Fourier basis (mathematics)",
          "note": "Grid field = sum of 3 cosines at 60-degree angles; equals the projection onto the 3 lowest hexagonal-lattice Fourier modes"
        },
        {
          "field_a_term": "grid scale lambda (spacing between grid fields) (neuroscience)",
          "field_b_term": "fundamental period of hexagonal lattice / reciprocal lattice spacing (mathematics)",
          "note": "Multiple grid scales form a multi-resolution code analogous to a Fourier series with different wavenumber octaves"
        },
        {
          "field_a_term": "grid cell phase (position of grid fields within environment) (neuroscience)",
          "field_b_term": "phase shift of the Fourier representation / lattice origin (mathematics)",
          "note": "Grid phase encodes the origin of the coordinate grid; population coding via distributed phases enables unique positions"
        },
        {
          "field_a_term": "toroidal neural manifold of grid cell population (neuroscience)",
          "field_b_term": "dual torus of the hexagonal lattice (quotient space R^2 / Lambda) (mathematics)",
          "note": "Population activity lies on a torus because each grid module is periodic; torus topology enables modular arithmetic path integration"
        }
      ],
      "references": [
        {
          "doi": "10.1126/science.1125572",
          "note": "Hafting et al. (2005) - microstructure of a spatial map in the entorhinal cortex (discovery of grid cells)"
        },
        {
          "doi": "10.1038/nature11692",
          "note": "Yoon et al. (2013) - specific evidence of low-dimensional continuous attractor dynamics in grid cells"
        },
        {
          "doi": "10.7554/eLife.08701",
          "note": "Doeller et al. (2016) - hexadirectional modulation of theta power: 60-degree periodicity in humans"
        }
      ],
      "last_reviewed": "2026-05-07",
      "file": "cross-domain/mathematics-neuroscience/b-grid-cells-hexagonal-lattice-fourier.yaml"
    },
    {
      "id": "b-population-vector-motor-cortex",
      "title": "Motor cortex population vectors (Georgopoulos 1986) show that cosine-tuned neurons linearly encode movement direction in a distributed representation, neural trajectories rotate through a low-dimensional manifold before movement onset (Churchland 2012), and these insights directly enable BCI decoding by linear population readout.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "Georgopoulos et al. (1986) recorded from individual M1 neurons during 8-direction arm reaching tasks and found broad directional tuning: r(θ) = r₀ + r_max·cos(θ - θᵢ), where θᵢ is each neuron's preferred direction. The population vector P(θ) = Σᵢ rᵢ(θ)·ĉᵢ (weighted sum of unit preferred direction ve",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-motor-cortex-rotational-dynamics-initial-condition-mechanism"
      ],
      "communication_gap": "Motor physiology and mathematical neuroscience use different vocabularies: physiologists describe tuning curves and firing rates while mathematicians describe dynamical systems and manifolds. The BCI engineering community is aware of the population vector but typically does not engage with the dynamical systems theory. Theoretical neuroscientists who study manifolds and dynamics rarely work with clinical BCI patients.\n",
      "translation_table": [
        {
          "field_a_term": "population vector P = Σᵢ rᵢĉᵢ (linear weighted sum)",
          "field_b_term": "decoded movement direction from motor cortex ensemble activity",
          "note": "The population vector averages out noise in individual neurons; accuracy grows as √N with neuron count"
        },
        {
          "field_a_term": "neural manifold (low-dimensional subspace of high-D activity)",
          "field_b_term": "M1 preparatory and movement state space (~10D out of 200+ neurons)",
          "note": "Manifold hypothesis: neural computation is constrained to a low-dimensional surface by recurrent connectivity structure"
        },
        {
          "field_a_term": "rotational dynamics (skew-symmetric A matrix)",
          "field_b_term": "preparatory activity in M1 before movement onset",
          "note": "Rotation generates the temporal patterning of muscle activation from a simple initial condition"
        },
        {
          "field_a_term": "linear decoder (Kalman filter)",
          "field_b_term": "BCI spike-to-cursor or spike-to-prosthetic arm decoding",
          "note": "Population vector readout principle enables real-time neural decoding; Kalman filter adds temporal smoothing"
        },
        {
          "field_a_term": "cosine tuning function r(θ) = r₀ + r_max·cos(θ - θᵢ)",
          "field_b_term": "directional selectivity of motor cortex neurons (preferred direction, tuning breadth)",
          "note": "Cosine tuning is optimal for a population code that represents all directions uniformly"
        }
      ],
      "references": [
        {
          "doi": "10.1126/science.3749885",
          "note": "Georgopoulos et al. (1986) Science 233:1416 — neuronal population coding of movement direction in motor cortex"
        },
        {
          "doi": "10.1152/jn.1994.72.1.309",
          "note": "Schwartz (1994) J Neurophysiol 72:609 — direct cortical representation of drawing; continuous population vector"
        },
        {
          "doi": "10.1038/nn.3643",
          "note": "Cunningham & Yu (2014) Nat Neurosci 17:1500 — dimensionality reduction for large-scale neural recordings"
        },
        {
          "doi": "10.1038/nature11129",
          "note": "Churchland et al. (2012) Nature 487:51 — neural population dynamics during reaching; rotational structure"
        }
      ],
      "last_reviewed": "2026-05-06",
      "file": "cross-domain/mathematics-neuroscience/b-population-vector-motor-cortex.yaml"
    },
    {
      "id": "b-reinforcement-learning-dopamine",
      "title": "The temporal difference (TD) prediction error δ_t = r_t + γV(s_{t+1}) − V(s_t) in reinforcement learning is exactly implemented by dopaminergic neurons in the ventral tegmental area — firing rates encode δ: burst on positive surprise, pause on negative surprise, silence on accurate prediction — the tightest known neuroscience-AI correspondence.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "Temporal difference (TD) learning (Sutton 1988; Sutton & Barto 1998) defines the prediction error: δ_t = r_t + γV(s_{t+1}) − V(s_t), where r_t is the reward received, γ ∈ (0,1) is the discount factor, and V(s) is the estimated value function (expected future discounted reward). The value function is",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-td-prediction-error-dopamine-burst-identity-schultz"
      ],
      "communication_gap": "The TD-dopamine identity was established in 1997, but the two communities (RL theory and systems neuroscience) continue to evolve largely independently. Computer science RL research rarely cites the neuroscience beyond the Schultz 1997 paper; neuroscientists rarely read the ICML/NeurIPS literature. The distributional RL-dopamine connection (Dabney 2020) required direct collaboration between DeepMind and neuroscientists — exceptional rather than routine.\n",
      "translation_table": [
        {
          "field_a_term": "TD prediction error δ_t = r_t + γV(s_{t+1}) − V(s_t)",
          "field_b_term": "dopamine neuron firing rate deviation from baseline",
          "note": "Burst = δ > 0; pause = δ < 0; no change = δ = 0 (prediction correct)"
        },
        {
          "field_a_term": "value function V(s) = E[Σ γᵏ r_{t+k}]",
          "field_b_term": "subjective expected value of a state",
          "note": "Represented in striatum and OFC; updated via dopaminergic TD signal"
        },
        {
          "field_a_term": "discount factor γ ∈ (0,1)",
          "field_b_term": "temporal discounting of future rewards (hyperbolic discounting in humans ≈ 1/(1+kd))",
          "note": "Humans show hyperbolic discounting; TD uses exponential — a known discrepancy"
        },
        {
          "field_a_term": "Q(s,a) action-value function",
          "field_b_term": "striatal action-value representation (medium spiny neuron activity)",
          "note": "D1-MSNs: direct pathway (Go, δ > 0 → LTP); D2-MSNs: indirect (NoGo, δ < 0 → LTD)"
        },
        {
          "field_a_term": "policy update via TD error (actor-critic)",
          "field_b_term": "dopamine-modulated synaptic plasticity in striatum",
          "note": "DA release → D1/D2 receptor activation → LTP/LTD → policy change"
        },
        {
          "field_a_term": "distributional RL (quantile regression)",
          "field_b_term": "heterogeneous dopamine neuron responses encoding different return quantiles",
          "note": "Dabney et al. (2020) Nature — different DA neurons encode optimistic/pessimistic predictions"
        }
      ],
      "references": [
        {
          "doi": "10.1126/science.275.5306.1593",
          "note": "Schultz, Dayan & Montague (1997) Science 275:1593 — dopamine as TD prediction error"
        },
        {
          "doi": "10.1523/JNEUROSCI.16-05-01936.1996",
          "note": "Montague, Dayan & Sejnowski (1996) J Neurosci 16:1936 — predictive Hebbian learning"
        },
        {
          "note": "Sutton & Barto (2018) Reinforcement Learning: An Introduction. 2nd ed. MIT Press."
        },
        {
          "doi": "10.1038/nature14236",
          "note": "Mnih et al. (2015) Nature 518:529 — Deep Q-Network (DQN) superhuman Atari performance"
        }
      ],
      "last_reviewed": "2026-05-06",
      "file": "cross-domain/mathematics-neuroscience/b-reinforcement-learning-dopamine.yaml"
    },
    {
      "id": "b-spectral-graph-theory-connectome",
      "title": "Graph Laplacian eigenmodes of the structural connectome define the brain's harmonic resonances — resting-state fMRI networks align with low-frequency Laplacian eigenvectors, bridging spectral graph theory to systems neuroscience.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "The graph Laplacian L = D − A (D = degree matrix, A = adjacency matrix) encodes all structural connectivity of a network. Its spectral decomposition Lψ_k = λ_k ψ_k produces eigenmodes ψ_k ordered by spatial frequency: ψ_1 (constant, λ₁ = 0) is the DC mode; ψ_2 (algebraic connectivity, λ₂ = Fiedler v",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-structural-eigenmode-functional-network-correspondence"
      ],
      "communication_gap": "Spectral graph theory is standard in algebraic combinatorics and network science but was not applied to neural connectomes until Atasoy et al. (2016). Neuroscientists trained in connectivity analysis (DTI, resting-state fMRI) do not routinely compute Laplacian eigendecompositions. The mathematical concept of \"graph harmonics\" (analogous to Fourier modes on a lattice) was not in the neuroscience vocabulary until this bridge was explicitly drawn.\n",
      "translation_table": [
        {
          "field_a_term": "graph Laplacian L = D − A",
          "field_b_term": "structural connectivity matrix of the neural connectome"
        },
        {
          "field_a_term": "eigenvalue λ_k (Laplacian spectrum)",
          "field_b_term": "spatial frequency of the k-th brain harmonic mode"
        },
        {
          "field_a_term": "eigenvector ψ_k (Laplacian eigenmode)",
          "field_b_term": "brain activity pattern at frequency k (structural harmonic)"
        },
        {
          "field_a_term": "algebraic connectivity λ₂ (Fiedler value)",
          "field_b_term": "synchronization threshold — minimum coupling for global coherence"
        },
        {
          "field_a_term": "graph partitioning (Fiedler vector bisection)",
          "field_b_term": "hemispheric or large-scale brain network segregation"
        },
        {
          "field_a_term": "spectral gap (λ₂/λ_max)",
          "field_b_term": "balance between integration and segregation in brain networks"
        },
        {
          "field_a_term": "heat kernel exp(−tL) (diffusion on graph)",
          "field_b_term": "propagation of activity along structural white matter paths"
        }
      ],
      "references": [
        {
          "note": "Chung (1997) Spectral Graph Theory. American Mathematical Society"
        },
        {
          "doi": "10.1038/ncomms10340",
          "note": "Atasoy et al. (2016) Human brain networks function in connectome-specific harmonic waves. Nat Commun 7:10340"
        },
        {
          "doi": "10.1016/j.neuroimage.2013.09.033",
          "note": "Abdelnour et al. (2014) Network diffusion accurately models the relationship between structural and functional connectivity. NeuroImage 90:335"
        },
        {
          "doi": "10.1016/j.neuroimage.2016.04.036",
          "note": "Robinson et al. (2016) Eigenmodes of brain activity. NeuroImage 142:79"
        }
      ],
      "last_reviewed": "2026-05-06",
      "file": "cross-domain/mathematics-neuroscience/b-spectral-graph-theory-connectome.yaml"
    },
    {
      "id": "b-algebraic-topology-defect-theory",
      "title": "Algebraic Topology and Defect Theory — homotopy group classification of topological defects in ordered media unifies nematic disclinations, superfluid vortices, magnetic monopoles, and cosmic strings",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "Topological defects are singularities in the order parameter field of a system with spontaneous symmetry breaking. Their stability and classification are determined by the topology of the order parameter space M, specifically its homotopy groups πₙ(M). Line defects (in 3D) are classified by π₁(M) — ",
      "open_unknowns": [],
      "related_hypotheses": [],
      "communication_gap": "Algebraic topology is a branch of pure mathematics studied by mathematicians; its application to defect classification in condensed matter physics is known to specialists in soft matter and liquid crystal physics (Mermin's 1979 Rev Mod Phys review is the bridge document) but not to most biophysicists, cosmologists, or materials scientists who encounter topological defects in their work. The Kibble–Zurek connection between cosmology and laboratory condensed matter is conceptually beautiful but remains confined to the specialist literature despite being accessible to any graduate-level physicist.\n",
      "translation_table": [
        {
          "field_a_term": "fundamental group pi_1(M) of order parameter space",
          "field_b_term": "classification of stable line defects in ordered medium",
          "note": "Non-trivial pi_1 elements correspond to stable defects; group structure governs defect combination rules"
        },
        {
          "field_a_term": "pi_1(RP^2) = Z_2 (nematic)",
          "field_b_term": "only one type of half-integer disclination in nematic liquid crystal",
          "note": "Two ½-disclinations can annihilate; integer disclinations are unstable and escape to third dimension"
        },
        {
          "field_a_term": "pi_1(S^1) = Z (superfluid)",
          "field_b_term": "integer-quantised vortex lines in superfluid He or BEC",
          "note": "Each vortex carries circulation h/m; quantisation enforced by single-valuedness of wavefunction"
        },
        {
          "field_a_term": "pi_2(S^2) = Z (magnetic monopole)",
          "field_b_term": "topological classification of hedgehog point defects",
          "note": "GUT monopoles are point defects; Kibble mechanism predicts one per causal horizon volume at phase transition"
        },
        {
          "field_a_term": "Kibble-Zurek mechanism (defect density ~ xi^{-d})",
          "field_b_term": "universal relationship between quench rate and defect density",
          "note": "Applied in lab: quench superfluid He or BEC and count vortices; confirms the cosmological prediction in table-top experiment"
        },
        {
          "field_a_term": "homotopy group non-commutativity (non-abelian pi_1)",
          "field_b_term": "non-trivial defect combination rules (crossing defects may entangle)",
          "note": "Biaxial nematics: pi_1 = quaternion group Q_8 (non-abelian) → defect lines can become linked or unknotted depending on order of crossing"
        }
      ],
      "references": [
        {
          "doi": "10.1103/RevModPhys.51.591",
          "note": "Mermin (1979) Rev Mod Phys 51:591 — topological theory of defects"
        },
        {
          "doi": "10.1088/0305-4470/9/8/029",
          "note": "Kibble (1976) J Phys A 9:1387 — topology of cosmic domains and strings"
        },
        {
          "doi": "10.1080/00018738200101438",
          "note": "Trebin (1982) Adv Phys 31:195 — topology of liquid crystal defects"
        },
        {
          "note": "Nakahara (2003) Geometry, Topology and Physics — textbook on homotopy and fibre bundles"
        }
      ],
      "last_reviewed": "2026-05-06",
      "file": "cross-domain/mathematics-physics/b-algebraic-topology-defect-theory.yaml"
    },
    {
      "id": "b-catastrophe-theory-phase-transitions",
      "title": "Thom's catastrophe theory classifies generic singularities of smooth potential functions by codimension, providing a rigorous topological description of all possible sudden qualitative changes — the same mathematics governs fold bifurcations in dynamical systems and first-order phase transitions in Landau free energy theory.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "Thom's catastrophe theory classifies the seven elementary catastrophes by codimension. The fold (codimension 1): V(x) = x³/3 - ux, bifurcation at u=0 where one stable state splits into two. The cusp (codimension 2): V(x) = x⁴/4 + ux²/2 + vx gives a surface of equilibria with a fold curve bounding th",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-catastrophe-theory-first-order-transitions"
      ],
      "communication_gap": "Catastrophe theory reached peak physics interest in the 1970s–80s but was dismissed in some communities due to overapplication by Zeeman to qualitative social phenomena. This created a cultural separation between the mathematically rigorous classification (universally accepted) and its applications, causing physicists to reinvent equivalent constructions without citing Thom's framework. Topology and dynamical-systems communities publish in different journals.\n",
      "translation_table": [
        {
          "field_a_term": "catastrophe codimension",
          "field_b_term": "number of thermodynamic control parameters at a multicritical point",
          "note": "codimension equals the number of independent tuning parameters needed to reach the singularity"
        },
        {
          "field_a_term": "fold catastrophe potential V(x) = x³/3 - ux",
          "field_b_term": "spinodal instability in a first-order transition",
          "note": "the fold bifurcation corresponds to the limit of metastability (spinodal)"
        },
        {
          "field_a_term": "cusp catastrophe hysteresis loop",
          "field_b_term": "first-order transition with latent heat and phase coexistence",
          "note": "the cusp separates the monostable and bistable regions of parameter space"
        },
        {
          "field_a_term": "butterfly catastrophe tricritical structure",
          "field_b_term": "tricritical point where second-order becomes first-order (He-3/He-4 mixtures)",
          "note": "requires 4-dimensional parameter control; tricritical exponents differ from Ising"
        },
        {
          "field_a_term": "gradient system ẋ = -dV/dx",
          "field_b_term": "overdamped relaxational dynamics near a phase transition",
          "note": "both describe irreversible approach to the nearest attractor"
        }
      ],
      "references": [
        {
          "note": "Thom (1975) Structural Stability and Morphogenesis; Benjamin, Reading MA"
        },
        {
          "note": "Zeeman (1976) Catastrophe theory. Sci Am 234:65–83"
        },
        {
          "doi": "10.1070/RM1975v030n05ABEH001521",
          "note": "Arnold (1975) Critical points of smooth functions and their normal forms. Russ Math Surv 30:1–75"
        },
        {
          "note": "Gilmore (1981) Catastrophe Theory for Scientists and Engineers; Wiley, New York"
        }
      ],
      "last_reviewed": "2026-05-06",
      "file": "cross-domain/mathematics-physics/b-catastrophe-theory-phase-transitions.yaml"
    },
    {
      "id": "b-chaos-theory-strange-attractors",
      "title": "Chaos theory bridges mathematics and physics: deterministic nonlinear systems (Lorenz equations, logistic map) exhibit sensitive dependence on initial conditions (positive Lyapunov exponents), universal period-doubling routes to chaos (Feigenbaum constant δ ≈ 4.669), and strange attractors with fractal geometry — connecting topology, dynamical systems theory, and atmospheric physics.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "A deterministic dynamical system exhibits chaos if and only if it satisfies: (1) Sensitive dependence on initial conditions: nearby trajectories diverge exponentially, quantified by the largest Lyapunov exponent λ₁ > 0 — the e-folding time for error growth is τ = 1/λ₁. (2) Topological transitivity: ",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-feigenbaum-universality-quantum-maps-period-doubling"
      ],
      "communication_gap": "Lorenz (1963) published in the Journal of Atmospheric Sciences — a meteorology journal — where his paper was largely ignored by mathematicians until Ruelle and Takens (1971) independently formalized strange attractors in a pure mathematics paper. Feigenbaum (1978) published in the Journal of Statistical Physics, where the RG connection was made by physicists. Experimental chaos (Libchaber's Rayleigh- Bénard experiments confirming Feigenbaum universality) was done by a physicist. The OGY chaos control paper was in Physical Review Letters. Despite the mathematical foundation being rigorous (Anosov, Smale, Bowen), chaos theory is primarily published in physics and applied mathematics venues, with limited integration into standard pure mathematics curricula.\n",
      "translation_table": [
        {
          "field_a_term": "Lyapunov exponent λ (rate of divergence of nearby trajectories)",
          "field_b_term": "predictability horizon τ = 1/λ (maximum useful forecast time)",
          "note": "for Lorenz atmosphere model λ ≈ 1/5 day⁻¹; prediction horizon ≈ 5 days — matches NWP limits"
        },
        {
          "field_a_term": "strange attractor (fractal set, non-integer Hausdorff dimension)",
          "field_b_term": "climate attractor (phase space of long-term atmospheric variability)",
          "note": "climate variability is the invariant measure on the atmospheric attractor — statistics are predictable even though trajectories are not"
        },
        {
          "field_a_term": "Feigenbaum constant δ ≈ 4.669 (universal period-doubling ratio)",
          "field_b_term": "critical exponents in statistical mechanics (universal near phase transitions)",
          "note": "both arise from renormalization group fixed points — chaos universality is RG universality"
        },
        {
          "field_a_term": "unstable periodic orbit (UPO) embedded in attractor",
          "field_b_term": "resonant frequency (target state for chaos control / synchronization)",
          "note": "OGY chaos control stabilizes a UPO — the target periodic orbit is intrinsic to the attractor"
        },
        {
          "field_a_term": "KAM tori (quasi-periodic orbits in Hamiltonian systems — KAM theorem)",
          "field_b_term": "regular regions in phase space (islands of stability in chaotic sea)",
          "note": "KAM theorem gives conditions for persistence of quasi-periodic orbits under perturbation"
        }
      ],
      "references": [
        {
          "doi": "10.1175/1520-0469(1963)020<0130:DNF>2.0.CO;2",
          "note": "Lorenz (1963) Deterministic nonperiodic flow; J Atmos Sci 20:130"
        },
        {
          "doi": "10.1038/261459a0",
          "note": "May (1976) Simple mathematical models with very complicated dynamics; Nature 261:459"
        },
        {
          "doi": "10.1007/BF01107494",
          "note": "Feigenbaum (1978) Quantitative universality for a class of nonlinear transformations; J Stat Phys 19:25"
        },
        {
          "doi": "10.1103/PhysRevLett.64.1196",
          "note": "Ott, Grebogi & Yorke (1990) Controlling chaos; Phys Rev Lett 64:1196"
        }
      ],
      "last_reviewed": "2026-05-06",
      "file": "cross-domain/mathematics-physics/b-chaos-theory-strange-attractors.yaml"
    },
    {
      "id": "b-differential-forms-maxwell",
      "title": "Maxwell's equations expressed in differential form notation — dF = 0 and d*F = J — reveal that classical electromagnetism is a U(1) gauge theory, the Aharonov-Bohm effect is a purely topological phenomenon, and Chern-Weil theory connects curvature forms to topological invariants, unifying differential geometry with physics.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "Maxwell's equations in classical vector notation (div B = 0, curl E = -dB/dt, div D = rho, curl H = J + dD/dt) are rewritten in the language of differential forms on 4-dimensional spacetime as two equations: dF = 0 and d*F = J, where F is the electromagnetic field strength 2-form, *F is its Hodge du",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-chern-simons-theory-topological-quantum-computation"
      ],
      "communication_gap": "The differential forms approach to electromagnetism is standard in mathematical physics (Misner-Thorne-Wheeler, Nakahara) but is rarely taught in undergraduate physics courses that use Jackson (classical vector notation). Mathematics graduate students learning differential geometry do not typically see the Maxwell equations example until they encounter gauge theory. The Aharonov-Bohm effect, though over 60 years old, is still not in most undergraduate physics curricula despite being one of the cleanest demonstrations that topology enters physics directly.\n",
      "translation_table": [
        {
          "field_a_term": "differential 2-form F = dA",
          "field_b_term": "electromagnetic field tensor F_{mu nu} = partial_mu A_nu - partial_nu A_mu",
          "note": "Form language makes coordinate independence manifest; tensor components are frame-dependent"
        },
        {
          "field_a_term": "Bianchi identity dF = 0",
          "field_b_term": "no magnetic monopoles (div B = 0) + Faraday's law (curl E = -dB/dt)",
          "note": "Two Maxwell equations unified into one topological identity"
        },
        {
          "field_a_term": "Hodge dual *F",
          "field_b_term": "electric-magnetic duality (E <-> B under *F in vacuum)",
          "note": "Hodge dual exchanges field strength with its dual — duality symmetry of vacuum Maxwell equations"
        },
        {
          "field_a_term": "Aharonov-Bohm phase (e/hbar) ∮ A·dl",
          "field_b_term": "holonomy of the U(1) connection around a loop — topological invariant",
          "note": "Phase is a Wilson loop — fundamental observable in gauge theory"
        },
        {
          "field_a_term": "first Chern class c1 in H²(M, Z)",
          "field_b_term": "magnetic monopole charge (Dirac quantization condition)",
          "note": "Topology quantizes charge — Dirac's heuristic derivation follows from c1 being integral"
        }
      ],
      "references": [
        {
          "note": "Misner, Thorne & Wheeler (1973) Gravitation. W.H. Freeman — chapters on differential forms"
        },
        {
          "doi": "10.1103/PhysRev.115.485",
          "note": "Aharonov & Bohm (1959) Phys Rev 115:485 — topological phase effect"
        },
        {
          "note": "Nakahara (2003) Geometry, Topology and Physics. IOP Publishing"
        },
        {
          "note": "Frankel (2012) The Geometry of Physics. Cambridge University Press"
        }
      ],
      "last_reviewed": "2026-05-06",
      "file": "cross-domain/mathematics-physics/b-differential-forms-maxwell.yaml"
    },
    {
      "id": "b-ergodic-theory-statistical-mechanics",
      "title": "Birkhoff's ergodic theorem guarantees that time averages equal ensemble averages for measure-preserving dynamical systems, directly justifying Gibbs's statistical mechanics; the KAM theorem identifies the subset of Hamiltonian systems that break ergodicity by preserving invariant tori, explaining why some quantum systems thermalise and others localise.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "Boltzmann's ergodic hypothesis (1884) conjectured that a gas molecule would, over infinite time, visit every point on the constant-energy hypersurface in phase space — making the time average of any observable equal to its phase-space average (ensemble average). This is the foundation of statistical",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-kam-nonergodicity-many-body-localization"
      ],
      "communication_gap": "Ergodic theory developed as pure mathematics in measure theory (Birkhoff, von Neumann, Halmos), while statistical mechanics developed as physics (Boltzmann, Gibbs, Maxwell). The formal connection via Birkhoff's theorem was understood by mathematicians but rarely taught in physics graduate programs. KAM theory was equally siloed in mathematical dynamics and solar system mechanics. The connection to quantum thermalisation and MBL was not appreciated until the 2010s — even though physicists had known about FPUT (1955) and the classical integrability barrier for decades. The fields of ergodic theory, Hamiltonian mechanics, and condensed matter quantum physics rarely cite each other's foundational literature.\n",
      "translation_table": [
        {
          "field_a_term": "Birkhoff ergodic theorem (time average = ensemble average)",
          "field_b_term": "Gibbs statistical mechanics (justify ensemble method for gases)"
        },
        {
          "field_a_term": "measure-preserving transformation T on phase space",
          "field_b_term": "Hamiltonian evolution of molecular system (Liouville's theorem)"
        },
        {
          "field_a_term": "ergodic system (dense orbit on energy surface)",
          "field_b_term": "thermalising quantum system (ETH-satisfying, Gibbs state)"
        },
        {
          "field_a_term": "KAM invariant torus (preserved by small perturbation)",
          "field_b_term": "many-body localised phase (LIOMs prevent thermalisation)"
        },
        {
          "field_a_term": "KAM threshold perturbation ε_c",
          "field_b_term": "MBL transition disorder strength W_c"
        },
        {
          "field_a_term": "FPUT recurrence (non-thermalising classical chain)",
          "field_b_term": "quantum scar states (non-thermalising eigenstates)"
        },
        {
          "field_a_term": "Lyapunov exponent (rate of ergodic mixing)",
          "field_b_term": "quantum chaos Lyapunov exponent (OTOC decay rate, MSS-bounded)"
        }
      ],
      "references": [
        {
          "note": "Birkhoff (1931) Proc Natl Acad Sci 17:656 — pointwise ergodic theorem",
          "doi": "10.1073/pnas.17.12.656"
        },
        {
          "note": "Kolmogorov (1954) Dokl Akad Nauk 98:527 — KAM theorem original paper"
        },
        {
          "note": "Ott (2002) Chaos in Dynamical Systems, 2nd ed. Cambridge University Press"
        },
        {
          "doi": "10.1103/RevModPhys.83.863",
          "note": "Polkovnikov et al. (2011) Rev Mod Phys 83:863 — thermalisation in isolated quantum systems, ETH review"
        }
      ],
      "last_reviewed": "2026-05-06",
      "file": "cross-domain/mathematics-physics/b-ergodic-theory-statistical-mechanics.yaml"
    },
    {
      "id": "b-fiber-bundle-gauge-field-topology",
      "title": "Yang-Mills gauge field theories are precisely the physics of connections on principal fiber bundles: the gauge potential A_μ is a connection 1-form, the field strength F_μν is its curvature 2-form, and gauge transformations are bundle automorphisms — making differential geometry and physics isomorphic descriptions of the same mathematical structure\n",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "A gauge theory with gauge group G is mathematically identical to a principal G-bundle P over spacetime M with a connection ω: gauge potential A_μ^a maps to the connection 1-form ω in local trivialization, field strength F_μν^a maps to the curvature 2-form Ω = Dω, parallel transport of charged partic",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-principal-bundle-chern-class-anomaly-cancellation"
      ],
      "communication_gap": "Differential geometers study principal bundles and connections abstractly while physicists learn gauge theory through field theory formalism; the isomorphism between the two languages, established by Wu & Yang (1975) and elaborated by Atiyah, is known to mathematical physicists but rarely taught in standard physics or mathematics curricula.\n",
      "translation_table": [
        {
          "field_a_term": "gauge potential A_μ^a (physics)",
          "field_b_term": "connection 1-form ω on principal G-bundle (mathematics)",
          "note": "In local trivialization s: U → P, ω pulls back to A = s*ω; gauge change = change of trivialization"
        },
        {
          "field_a_term": "field strength tensor F_μν^a (physics)",
          "field_b_term": "curvature 2-form Ω = dω + ω∧ω of the connection (mathematics)",
          "note": "F_μν = ∂_μ A_ν - ∂_ν A_μ + [A_μ, A_ν] is exactly the local expression of Ω in a trivialization"
        },
        {
          "field_a_term": "gauge transformation (physics)",
          "field_b_term": "vertical automorphism of the principal bundle / change of local section (mathematics)",
          "note": "A gauge transformation g: M → G acts as A → g^(-1)Ag + g^(-1)dg, which is the transition between trivializations"
        },
        {
          "field_a_term": "Aharonov-Bohm phase (physics)",
          "field_b_term": "holonomy of the connection around a closed loop (mathematics)",
          "note": "Charged particle encircling a solenoid acquires phase = exp(i∮A·dl) = holonomy of the U(1) connection"
        }
      ],
      "references": [
        {
          "doi": "10.1103/PhysRevD.12.3845",
          "note": "Wu & Yang (1975) - concept of nonintegrable phase factors and global formulation of gauge fields"
        },
        {
          "doi": "10.1098/rspa.1984.0059",
          "note": "Atiyah & Bott (1983) - the Yang-Mills equations over Riemann surfaces"
        }
      ],
      "last_reviewed": "2026-05-07",
      "file": "cross-domain/mathematics-physics/b-fiber-bundle-gauge-field-topology.yaml"
    },
    {
      "id": "b-fourier-analysis-wave-mechanics",
      "title": "Fourier Analysis and Wave Mechanics — decomposition of functions into sinusoidal components connects PDE solutions, signal processing, and quantum uncertainty",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "The Fourier transform F(ω) = ∫f(t)e^{-iωt}dt decomposes any square-integrable function into sinusoidal components, establishing a bijective correspondence between the time domain and frequency domain. Parseval's theorem asserts energy conservation: ∫|f(t)|²dt = (1/2π)∫|F(ω)|²dω — the total energy is",
      "open_unknowns": [],
      "related_hypotheses": [],
      "communication_gap": "Mathematicians emphasise the abstract L² Hilbert space framework and functional analysis; physicists use Fourier decomposition as a practical computational tool; engineers focus on DFT/FFT implementation. Students often learn these three perspectives in isolation, missing the unifying conceptual thread from Fourier (1822) through Shannon sampling theory to quantum field theory mode expansions.\n",
      "translation_table": [
        {
          "field_a_term": "Fourier transform F(ω) = ∫f(t)e^{-iωt}dt",
          "field_b_term": "spectral decomposition of a wave field into normal modes",
          "note": "The abstract L² isomorphism concretely corresponds to measuring a signal's frequency content"
        },
        {
          "field_a_term": "Parseval's theorem (energy conservation between domains)",
          "field_b_term": "conservation of wave energy density integrated over all frequencies",
          "note": "Unitary nature of the Fourier transform; essential for radiometry and power spectral density"
        },
        {
          "field_a_term": "uncertainty principle σ_t·σ_ω ≥ ½",
          "field_b_term": "trade-off between temporal resolution and frequency resolution",
          "note": "In quantum mechanics becomes Δx·Δp ≥ ℏ/2; in signal processing sets limits for filter design"
        },
        {
          "field_a_term": "FFT algorithm O(N log N) complexity",
          "field_b_term": "efficient numerical computation of spectral content",
          "note": "Cooley–Tukey divide-and-conquer reduces N² multiplications; foundational to all digital DSP"
        },
        {
          "field_a_term": "dispersion relation ω = ck",
          "field_b_term": "phase velocity of plane waves in a medium",
          "note": "Non-trivial dispersion (ω not proportional to k) causes wave-packet spreading (group velocity ≠ phase velocity)"
        },
        {
          "field_a_term": "Laplacian eigenfunctions (normal modes)",
          "field_b_term": "standing waves in bounded domain (instrument, waveguide, quantum well)",
          "note": "Quantisation in quantum mechanics arises from boundary conditions on ψ; same mathematics as vibrating drum"
        }
      ],
      "references": [
        {
          "doi": "10.1090/S0273-0979-04-01007-4",
          "note": "Folland & Sitaram (1997) J Fourier Anal Appl 3:207 — uncertainty principles survey"
        },
        {
          "doi": "10.1090/S0025-5718-1965-0178586-1",
          "note": "Cooley & Tukey (1965) Math Comput 19:297 — FFT algorithm"
        },
        {
          "doi": "10.1119/1.10660",
          "note": "Griffiths (2017) Introduction to Quantum Mechanics — Fourier methods in QM"
        },
        {
          "note": "Fourier (1822) Théorie Analytique de la Chaleur — original decomposition of heat equation solutions"
        }
      ],
      "last_reviewed": "2026-05-06",
      "file": "cross-domain/mathematics-physics/b-fourier-analysis-wave-mechanics.yaml"
    },
    {
      "id": "b-gauge-theory-x-connection-forms",
      "title": "Gauge fields in physics are properly understood as connection 1-forms on principal bundles, unifying Yang–Mills intuition with differential-geometry language.",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "Physicists introduce gauge potentials A_μ to encode forces and charge parallel transport; mathematicians define connections on principal G-bundles that assign horizontal lifts to paths. Curvature corresponds to field strength F = dA + A∧A in non-abelian cases. The bridge makes covariant derivative, ",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-gauge-fixing-parallels-coordinate-choice-in-models"
      ],
      "communication_gap": "Physics introductions often delay bundles until advanced courses; mathematics presentations may omit physical units and measurement protocols. Students duplicate effort learning the same object twice.",
      "translation_table": [
        {
          "field_a_term": "gauge potential A",
          "field_b_term": "local connection 1-form on a principal bundle"
        },
        {
          "field_a_term": "field strength F",
          "field_b_term": "curvature 2-form"
        },
        {
          "field_a_term": "gauge transformation",
          "field_b_term": "fiber automorphism / change of local trivialization"
        }
      ],
      "references": [
        {
          "doi": "10.1103/PhysRev.96.191",
          "note": "Yang & Mills (1954) — non-abelian gauge fields"
        },
        {
          "doi": "10.1016/0370-1573(74)90023-6",
          "note": "Wu & Yang (1975) — conceptual discussion of gauge fields (representative bridge paper)"
        }
      ],
      "last_reviewed": "2026-05-07",
      "file": "cross-domain/mathematics-physics/b-gauge-theory-x-connection-forms.yaml"
    },
    {
      "id": "b-geodesic-flow-billiard-ergodic-theory",
      "title": "Geodesic flow on compact surfaces of negative curvature is the archetypal chaotic dynamical system and the continuous-space analogue of billiard dynamics in polygonal tables — both are Anosov flows with the same ergodic properties, making differential geometry and discrete billiard theory two perspectives on the same ergodic universality class.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "proposed",
      "bridge_claim": "Geodesic flow on a compact Riemannian manifold of negative curvature describes a particle moving at constant speed along geodesics. In negative curvature, nearby geodesics diverge exponentially — Anosov hyperbolicity. This is the continuous analogue of a billiard ball bouncing in a stadium-shaped or",
      "open_unknowns": [],
      "related_hypotheses": [],
      "communication_gap": "",
      "translation_table": [
        {
          "field_a_term": "geodesic on negatively curved surface",
          "field_b_term": "billiard trajectory in dispersing Sinai billiard",
          "note": "Both are straight-line motions bent/reflected by boundary/curvature conditions"
        },
        {
          "field_a_term": "Anosov hyperbolicity (uniform expansion/contraction)",
          "field_b_term": "exponential orbit divergence at billiard reflections",
          "note": "Both produce exponential sensitivity to initial conditions — identical ergodic properties"
        },
        {
          "field_a_term": "Liouville measure (geodesic flow invariant measure)",
          "field_b_term": "uniform measure on billiard phase space",
          "note": "The natural invariant measures that make both systems ergodic"
        },
        {
          "field_a_term": "Markov partition",
          "field_b_term": "symbolic coding of billiard trajectories",
          "note": "Both use partitions to reduce continuous dynamics to symbolic sequences"
        }
      ],
      "references": [
        {
          "doi": "10.1007/978-3-662-06796-4",
          "note": "Sinai (1976) — Introduction to Ergodic Theory; geodesic flow and billiards"
        },
        {
          "doi": "10.1007/BF02392299",
          "note": "Anosov (1967) — geodesic flows on closed Riemannian manifolds of negative curvature"
        },
        {
          "doi": "10.1103/PhysRevLett.52.1",
          "note": "Bohigas et al. (1984) — characterization of chaotic spectra and universality"
        }
      ],
      "last_reviewed": "2026-05-07",
      "file": "cross-domain/mathematics-physics/b-geodesic-flow-billiard-ergodic-theory.yaml"
    },
    {
      "id": "b-geometric-measure-minimal-surfaces",
      "title": "Geometric measure theory (currents, varifolds, Almgren regularity) provides the rigorous existence and regularity theory for minimal surfaces solving Plateau's problem, with direct physical applications to soap films, black hole event horizon area theorems, biological membrane Willmore energy minimization, and singularity analysis in nonlinear PDE.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "Plateau's problem (1873): given a closed Jordan curve Γ in ℝ³, find the surface of minimum area bounded by Γ. Douglas and Radó (1931, Fields Medal to Douglas) proved existence for any Jordan curve using conformal parametrization. GMT (Federer-Fleming 1960) provides a more general framework: integer ",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-willmore-energy-biological-membrane-morphogenesis-ground-state"
      ],
      "communication_gap": "GMT is taught in pure mathematics departments and required for Riemannian geometry PhD programs; it rarely appears in physics curricula despite Penrose's use of related ideas. Biological membrane physicists use Helfrich/Willmore energy without being aware of the existence/regularity theory that guarantees minimizers are well-defined. General relativists use apparent horizon finders numerically without engaging GMT regularity theory. The Almgren regularity paper (1700 pages) is famously impenetrable.\n",
      "translation_table": [
        {
          "field_a_term": "integer rectifiable current T in GMT",
          "field_b_term": "physical surface with singularities (soap film edge branching)",
          "note": "Currents allow multiplicity > 1 and handle triple junctions in soap foams"
        },
        {
          "field_a_term": "Almgren regularity theorem (singular set dimension ≤ n-8)",
          "field_b_term": "soap film triple lines are dimension 1, not dimension n-8 (physical constraint)",
          "note": "Physical soap films are more regular than GMT requires — surface tension selects special structure"
        },
        {
          "field_a_term": "monotonicity formula for minimal surfaces: Θ(r) = Area(B_r)/πr² is non-decreasing",
          "field_b_term": "Penrose singularity theorem — trapped surface implies geodesic incompleteness",
          "note": "Both use area-comparison monotonicity; Penrose's 2020 Nobel Prize based on related ideas"
        },
        {
          "field_a_term": "Willmore energy W = ∫(H² - K) dA",
          "field_b_term": "bending energy of lipid bilayer membrane (Helfrich model)",
          "note": "Helfrich (1973) derived the same functional from physical elasticity theory"
        },
        {
          "field_a_term": "varifold (generalized surface with tangent plane measure)",
          "field_b_term": "phase-field approximation of membrane at mesoscopic scale",
          "note": "Varifolds are the natural GMT objects encoding surface tilt/twist without global parametrization"
        }
      ],
      "references": [
        {
          "doi": "10.2307/1989302",
          "note": "Douglas (1931) Solution of the problem of Plateau. Trans Am Math Soc 33:263"
        },
        {
          "note": "Almgren (1966) Plateau's Problem: An Invitation to Varifold Geometry. W. A. Benjamin"
        },
        {
          "doi": "10.1002/cpa.3160340603",
          "note": "Schoen & Simon (1981) Regularity of stable minimal hypersurfaces. Commun Pure Appl Math 34:741"
        },
        {
          "note": "Federer (1969) Geometric Measure Theory. Springer Grundlehren vol. 153"
        }
      ],
      "last_reviewed": "2026-05-06",
      "file": "cross-domain/mathematics-physics/b-geometric-measure-minimal-surfaces.yaml"
    },
    {
      "id": "b-group-theory-symmetry-breaking",
      "title": "Spontaneous symmetry breaking — from ferromagnetism to the Higgs mechanism to crystal formation — is described by the mathematical framework of Lie group representations: when the ground state has symmetry H ⊂ G, the quotient G/H parametrises degenerate vacua and Goldstone's theorem counts the massless modes.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "Spontaneous symmetry breaking (SSB) occurs when the ground state of a physical system has lower symmetry than its Hamiltonian. The mathematical structure is encoded in Lie group theory:\n- The system has symmetry group G (the full symmetry of the Hamiltonian). - The ground state breaks G down to a re",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-symmetry-breaking-universal-phase-transition-classifier"
      ],
      "communication_gap": "Condensed matter physicists learn SSB through the Landau order parameter and Ginzburg-Landau theory; particle physicists learn it through Lie algebra representations and gauge theories. Both communities use the same mathematics but with entirely different notations, physical intuitions, and application domains. Cross-pollination — e.g., applying the Higgs mechanism formalism to topological superconductors — is rare but extremely productive when it occurs.\n",
      "translation_table": [
        {
          "field_a_term": "symmetry group G of the Hamiltonian",
          "field_b_term": "symmetry of physical system above transition",
          "note": "G describes all transformations that leave the energy functional invariant"
        },
        {
          "field_a_term": "coset space G/H",
          "field_b_term": "manifold of degenerate ground states (order parameter space)",
          "note": "Each point in G/H is a distinct but energetically equivalent ground state"
        },
        {
          "field_a_term": "broken generators T_a ∈ Lie(G) \\ Lie(H)",
          "field_b_term": "Goldstone boson fields (magnons, phonons, pions)",
          "note": "One Goldstone mode per broken generator; massless in the absence of explicit breaking"
        },
        {
          "field_a_term": "representation of H (unbroken subgroup)",
          "field_b_term": "spectrum of the ground state (particle content)",
          "note": "Particles in the ground state transform in representations of the unbroken H"
        },
        {
          "field_a_term": "gauge symmetry + SSB (Higgs mechanism)",
          "field_b_term": "massive gauge bosons (W±, Z); Meissner effect in superconductors",
          "note": "Goldstone bosons become longitudinal polarisations of massive gauge fields"
        }
      ],
      "references": [
        {
          "doi": "10.1007/BF02812722",
          "note": "Goldstone, J. (1961). Field theories with superconductor solutions. Nuovo Cimento 19:154–164."
        },
        {
          "doi": "10.1103/PhysRev.130.439",
          "note": "Anderson, P.W. (1963). Plasmons, gauge invariance, and mass. Phys. Rev. 130:439."
        },
        {
          "note": "Weinberg, S. (1996). The Quantum Theory of Fields, Vol. 2. Cambridge University Press. -- Comprehensive treatment of SSB in quantum field theory"
        },
        {
          "note": "Coleman, S. (1985). Aspects of Symmetry. Cambridge University Press. -- Classic lectures on SSB, Goldstone theorem, and the Higgs mechanism"
        },
        {
          "doi": "10.1103/PhysRevLett.17.1133",
          "note": "Higgs, P.W. (1966). Spontaneous symmetry breakdown without massless bosons. Phys. Rev. Lett."
        }
      ],
      "last_reviewed": "2026-05-06",
      "file": "cross-domain/mathematics-physics/b-group-theory-symmetry-breaking.yaml"
    },
    {
      "id": "b-integrable-systems-solitons",
      "title": "The inverse scattering transform (Gardner-Greene-Kruskal-Miura 1967) solves the KdV equation exactly via N-soliton solutions, with Lax pair integrability providing infinitely many conservation laws — unifying Liouville integrable systems theory with soliton physics and optical fiber communications.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "A soliton is a solitary wave that maintains its shape and speed after collisions with other solitons — emerging intact from interactions with only a phase shift. This remarkable particle-like behavior was discovered numerically by Zabusky & Kruskal (1965) for the Korteweg-de Vries (KdV) equation: u_",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-quantum-solitons-bethe-ansatz-connection-quantum-inverse-scattering"
      ],
      "communication_gap": "Classical soliton theory and integrable systems are studied in applied mathematics departments (SIAM J Math Anal, Commun Math Phys, Nonlinearity) using PDE analysis, while optical solitons are studied in photonics and electrical engineering (Optics Letters, J Lightwave Technol, Optics Express). The Lax pair formalism is taught in mathematical physics courses but not in optics engineering curricula. The connection between the NLS soliton and KdV integrability (both are IST-solvable) is well known in the mathematical community but not widely recognized by optical engineers deploying fiber soliton communications. Condensed matter physicists using the Bethe ansatz rarely engage with classical IST literature despite the deep mathematical connection.\n",
      "translation_table": [
        {
          "field_a_term": "Fourier transform (linear wave decomposition)",
          "field_b_term": "inverse scattering transform (nonlinear wave decomposition into solitons)"
        },
        {
          "field_a_term": "plane wave e^{ikx} (Fourier eigenfunction)",
          "field_b_term": "soliton u_n(x) = 2κ_n²·sech²(κ_n(x-x_n)) (IST eigenfunction)"
        },
        {
          "field_a_term": "eigenvalue κ_n of Schrödinger operator L = -∂² + u",
          "field_b_term": "soliton amplitude and speed parameter"
        },
        {
          "field_a_term": "Lax pair (L, A) satisfying L_t = [L, A]",
          "field_b_term": "conserved Hamiltonian structure / infinite symmetry algebra"
        },
        {
          "field_a_term": "Liouville integrability (N conserved quantities in involution)",
          "field_b_term": "exact solvability of N-soliton dynamics"
        },
        {
          "field_a_term": "group velocity dispersion β₂ in optical fiber",
          "field_b_term": "dispersive term u_xxx in KdV (broadens pulse)"
        },
        {
          "field_a_term": "Kerr nonlinearity γ|ψ|² (self-phase modulation)",
          "field_b_term": "nonlinear term 6uu_x in KdV (steepens pulse)"
        },
        {
          "field_a_term": "soliton compression (anomalous dispersion + nonlinearity balance)",
          "field_b_term": "shape-preserving nonlinear wave — dispersion exactly cancelled"
        }
      ],
      "references": [
        {
          "doi": "10.1103/PhysRevLett.19.1095",
          "note": "Gardner et al. (1967) Phys Rev Lett 19:1095 — method for solving the Korteweg-de Vries equation (inverse scattering transform)"
        },
        {
          "doi": "10.1103/PhysRevLett.15.240",
          "note": "Zabusky & Kruskal (1965) Phys Rev Lett 15:240 — interaction of 'solitons' in a collisionless plasma (soliton naming)"
        },
        {
          "doi": "10.1063/1.1654120",
          "note": "Hasegawa & Tappert (1973) Appl Phys Lett 23:142 — transmission of stationary nonlinear optical pulses in dispersive dielectric fibers"
        },
        {
          "note": "Ablowitz & Segur (1981) Solitons and the Inverse Scattering Transform — SIAM (mathematical reference)"
        }
      ],
      "last_reviewed": "2026-05-06",
      "file": "cross-domain/mathematics-physics/b-integrable-systems-solitons.yaml"
    },
    {
      "id": "b-measure-theory-probability",
      "title": "Kolmogorov's measure-theoretic axiomatization (1933) provides the rigorous foundation unifying probability theory and analysis: a probability space (Ω, F, P) with σ-algebra F and countably additive measure P is the mathematical backbone of quantum mechanics, statistical mechanics, and stochastic processes — making probability a branch of measure theory rather than combinatorics.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "Before Kolmogorov (1933), probability theory rested on informal, domain-specific foundations. Kolmogorov's axioms unified probability under measure theory: a probability space is a triple (Ω, F, P) where Ω is the sample space, F is a σ-algebra of events (closed under countable union and complementat",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-quantum-probability-gleason-measure-uniqueness"
      ],
      "communication_gap": "Probability theory and physics developed through largely separate communities through most of the 20th century. Physicists learned probability through statistical mechanics (Boltzmann, Gibbs) using phase space densities, while mathematicians developed measure-theoretic probability following Kolmogorov. The unification was gradual: Kac (1949) showed the connection between Feynman path integrals and Wiener measure; Gleason (1957) proved the uniqueness of the Born rule using measure theory. Most physics curricula still teach probability informally, without σ-algebras, while pure mathematics probability courses rarely connect to physical applications.\n",
      "translation_table": [
        {
          "field_a_term": "σ-algebra F (events closed under countable operations)",
          "field_b_term": "set of physically observable propositions (Borel sets of phase space)",
          "note": "in classical mechanics, F = Borel(ℝ^{2n}); in QM, F = projection operators on Hilbert space"
        },
        {
          "field_a_term": "probability measure P (countably additive, P(Ω) = 1)",
          "field_b_term": "Born rule measure |ψ(x)|² dμ in quantum mechanics",
          "note": "Gleason's theorem shows Born rule is the unique measure on Hilbert space subspaces"
        },
        {
          "field_a_term": "Lebesgue integral E[X] = ∫X dP",
          "field_b_term": "quantum mechanical expectation ⟨A⟩ = ⟨ψ|A|ψ⟩",
          "note": "both are linear functionals on measurable/Hermitian functions"
        },
        {
          "field_a_term": "strong law of large numbers (a.s. convergence)",
          "field_b_term": "ergodic theorem in statistical mechanics (time average = ensemble average)",
          "note": "Birkhoff ergodic theorem is a measure-theoretic generalization of the LLN"
        },
        {
          "field_a_term": "characteristic function φ_X(t) = E[e^{itX}]",
          "field_b_term": "partition function Z(β) = Tr[e^{-βH}] (Laplace transform of spectrum)",
          "note": "both are moment-generating functions; inverse Fourier/Laplace recovers the distribution"
        },
        {
          "field_a_term": "weak convergence of probability measures",
          "field_b_term": "thermodynamic limit (N → ∞, V → ∞, N/V fixed)",
          "note": "phase transitions in statistical mechanics correspond to non-convergence of characteristic functions"
        }
      ],
      "references": [
        {
          "note": "Kolmogorov (1933) Grundbegriffe der Wahrscheinlichkeitsrechnung; English translation — Foundations of the Theory of Probability (Chelsea, 1956)"
        },
        {
          "note": "Billingsley (1995) Probability and Measure (3rd ed.). Wiley"
        },
        {
          "doi": "10.1007/BF03018479",
          "note": "Borel (1909) Les probabilités dénombrables et leurs applications arithmétiques; Rend Circ Mat Palermo 27:247"
        },
        {
          "note": "Durrett (2019) Probability: Theory and Examples (5th ed.). Cambridge University Press"
        }
      ],
      "last_reviewed": "2026-05-06",
      "file": "cross-domain/mathematics-physics/b-measure-theory-probability.yaml"
    },
    {
      "id": "b-morse-homology-x-conley-index-isolated-invariants",
      "title": "Morse homology counts gradient trajectories between critical points of Morse functions on manifolds — while Conley index theory assigns isolated invariant-set indices to broader dynamical blocks beyond gradient settings — providing paired algebraic-topological tools linking variational Morse theory with generalized isolating neighborhoods used in nonsmooth dynamics and Arnold-style conjecture routes in mathematical physics pedagogy.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "Morse homology recovers ordinary homology via chain complexes built from critical points and gradient flow lines — historically motivating Floer-type theories bridging topology with PDE gradient flows in physics contexts (e.g., Yang–Mills inspired frameworks glossed at survey level). Conley index ge",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-morse-homology-x-conley-index-isolated-invariants"
      ],
      "communication_gap": "Graduate topology sequences emphasize Morse homology proofs while applied dynamics conferences cite Conley indices rarely unless attendees trained in algebraic topology seminars — physics departments encounter both primarily through specialized geometric analysis electives.\n",
      "translation_table": [
        {
          "field_a_term": "Morse critical points (index k)",
          "field_b_term": "Generators of cellular Morse chain groups in homology computations",
          "note": "Gradient flows supply boundary operators counting connecting orbits."
        },
        {
          "field_a_term": "Conley index of isolated invariant set S",
          "field_b_term": "Homotopy-type certificate capturing local dynamics module isolating block exit sets",
          "note": "Strict generalization when gradient structure absent — carries functorial continuation properties."
        },
        {
          "field_a_term": "Continuation of Morse complexes under parameter change",
          "field_b_term": "Continuation isomorphism for Conley indices across isolating neighborhoods",
          "note": "Parallel narrative about topological persistence across bifurcation corridors under controlled deformations."
        }
      ],
      "references": [
        {
          "doi": "10.1090/surv/038",
          "note": "Mischaikow & Mrozek — Conley Index Theory handbook-style AMS survey lineage"
        },
        {
          "doi": "10.1016/S0167-2789(98)00141-X",
          "note": "Szymczak — computational homology / cubical representations bridging dynamics–topology algorithms context"
        }
      ],
      "last_reviewed": "2026-05-07",
      "file": "cross-domain/mathematics-physics/b-morse-homology-x-conley-index-isolated-invariants.yaml"
    },
    {
      "id": "b-nonlinear-optics-soliton-propagation",
      "title": "Optical solitons in nonlinear fiber optics arise when the Kerr nonlinearity (n = n_0 + n_2*I) exactly balances group velocity dispersion, producing pulse profiles described by the nonlinear Schrödinger equation i*∂A/∂z + (β_2/2)*∂^2A/∂t^2 - γ|A|^2*A = 0 whose exact soliton solutions are mathematically identical to the KdV solitons of shallow water waves",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "The nonlinear Schrödinger equation (NLSE) governing optical pulse propagation i*∂A/∂z + (β_2/2)*∂^2A/∂t^2 - γ|A|^2*A = 0 is exactly integrable via the inverse scattering transform: its fundamental soliton solution A(z,t) = √P_0 * sech(t/T_0) * exp(iz/2L_D) propagates without distortion because nonli",
      "open_unknowns": [],
      "related_hypotheses": [],
      "communication_gap": "Fiber optics engineers optimize transmission systems empirically while mathematical physicists study integrability and the inverse scattering transform; the connection between physical soliton stability and the Lax pair / inverse scattering mathematical structure is rarely communicated across the engineering and mathematics communities.",
      "translation_table": [
        {
          "field_a_term": "optical pulse propagation in nonlinear fiber (optics)",
          "field_b_term": "nonlinear Schrödinger equation with anomalous dispersion (mathematics)",
          "note": "NLSE: i*∂A/∂z = -(β_2/2)*∂^2A/∂t^2 + γ|A|^2*A; anomalous dispersion (β_2 < 0) required for bright solitons"
        },
        {
          "field_a_term": "Kerr nonlinearity n_2 (optics)",
          "field_b_term": "cubic nonlinear term γ in NLSE (mathematics)",
          "note": "γ = n_2*ω_0/(c*A_eff) connects physical material property to the dimensionless nonlinearity coefficient"
        },
        {
          "field_a_term": "soliton self-frequency shift (optics)",
          "field_b_term": "Raman perturbation of NLSE exact soliton solution (mathematics)",
          "note": "Higher-order effects break exact integrability; perturbation theory predicts continuous red-shift of soliton spectrum"
        },
        {
          "field_a_term": "soliton fission and supercontinuum generation (optics)",
          "field_b_term": "breakup of higher-order soliton under perturbation (mathematics)",
          "note": "N-soliton solution decomposes into N fundamental solitons under perturbation; used in photonic crystal fiber supercontinua"
        }
      ],
      "references": [
        {
          "doi": "10.1103/PhysRevLett.45.1095",
          "note": "Mollenauer et al. (1980) - experimental observation of optical solitons in single-mode fibers"
        },
        {
          "doi": "10.1109/JQE.1973.1077383",
          "note": "Hasegawa & Tappert (1973) - prediction of soliton propagation in optical fibers"
        },
        {
          "doi": "10.1017/CBO9780511824692",
          "note": "Agrawal (2007) - Nonlinear Fiber Optics: comprehensive NLSE treatment"
        }
      ],
      "last_reviewed": "2026-05-07",
      "file": "cross-domain/mathematics-physics/b-nonlinear-optics-soliton-propagation.yaml"
    },
    {
      "id": "b-percolation-network-robustness",
      "title": "Percolation theory — the second-order phase transition from isolated clusters to a giant connected component at threshold p_c = 1/⟨k⟩ on Erdős-Rényi graphs — quantifies network robustness: scale-free networks (Barabási-Albert, P(k)∝k^{-γ}) are robust to random failures but fragile to targeted hub attacks, with p_c→0 as N→∞, transforming network resilience engineering into a percolation problem.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "Percolation theory, originally developed for porous media and ferromagnetism, describes the emergence of large-scale connectivity in random structures.\nSite percolation on a network: each node is \"occupied\" (functional) with probability p and removed with probability 1-p. Below a critical threshold ",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-targeted-hub-vaccination-achieves-herd-immunity-fewer-doses-scale-free"
      ],
      "communication_gap": "Percolation theory was developed by mathematical physicists and published in Physical Review Letters and Journal of Statistical Physics. Network science applied this to social and technological networks (Barabási, Watts) in Science and Nature. The epidemiology community independently developed the concept of targeted vaccination without full awareness of the percolation framework. Infrastructure engineers design resilience criteria from engineering standards (N-1 reliability) without awareness of percolation theory's predictions about scale-free fragility. The three communities rarely cite each other.\n",
      "translation_table": [
        {
          "field_a_term": "Percolation threshold p_c (statistical physics)",
          "field_b_term": "Critical fraction of nodes that must remain functional for network connectivity",
          "note": "p_c → 0 for scale-free networks with γ ≤ 3 — essentially immune to random failures"
        },
        {
          "field_a_term": "Giant connected component (GCC) emergence (percolation)",
          "field_b_term": "System-wide connectivity enabling global communication or cascading failure",
          "note": "GCC = functional backbone of the network"
        },
        {
          "field_a_term": "Second-order phase transition at p_c (criticality)",
          "field_b_term": "Sudden collapse of network function as failures approach p_c",
          "note": "Network fails abruptly, not gradually, at the percolation threshold"
        },
        {
          "field_a_term": "Degree distribution P(k) ∝ k^{-γ} (scale-free network)",
          "field_b_term": "Hub-dominated topology: few nodes have most connections",
          "note": "Hubs dominate connectivity; removing them catastrophically disrupts the GCC"
        },
        {
          "field_a_term": "Belief propagation / cavity method (statistical physics)",
          "field_b_term": "Near-optimal network dismantling algorithm (engineering)",
          "note": "Message-passing on factor graphs solves NP-hard dismantling problem approximately"
        },
        {
          "field_a_term": "Targeted immunisation of high-degree nodes (epidemiology)",
          "field_b_term": "Equivalent to hub removal in network dismantling",
          "note": "Vaccinating hubs exploits scale-free fragility to efficiently block epidemic spread"
        }
      ],
      "references": [
        {
          "note": "Erdős & Rényi (1960) On the evolution of random graphs, Publ Math Inst Hung Acad Sci 5:17 — discovers the phase transition in random graph connectivity (the \"double jump\")\n"
        },
        {
          "doi": "10.1038/35019019",
          "note": "Albert, Jeong & Barabási (2000) Error and attack tolerance of complex networks, Nature 406:378 — demonstrates scale-free robustness to random failures and fragility to targeted hub attacks\n"
        },
        {
          "doi": "10.1103/PhysRevLett.85.4626",
          "note": "Cohen, Erez, ben-Avraham & Havlin (2000) Resilience of the Internet to random breakdowns, Phys Rev Lett 85:4626 — derives p_c = 0 for scale-free networks analytically\n"
        },
        {
          "doi": "10.1038/nature14603",
          "note": "Morone & Makse (2015) Influence maximisation in complex networks through optimal percolation, Nature 524:65 — belief propagation for optimal network dismantling\n"
        }
      ],
      "last_reviewed": "2026-05-06",
      "file": "cross-domain/mathematics-physics/b-percolation-network-robustness.yaml"
    },
    {
      "id": "b-perturbation-theory-quantum-corrections",
      "title": "Perturbation theory in mathematics provides the systematic expansion machinery for quantum corrections in physics — from Rayleigh-Schrödinger eigenvalue series to Feynman-diagram QED calculations verified to 10 significant figures.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "The mathematical framework of perturbation theory — expanding solutions of (H₀ + λV)|n⟩ = Eₙ|n⟩ in powers of λ — maps directly onto the physical calculation of quantum corrections. First-order energy correction E_n^(1) = ⟨n⁰|V|n⁰⟩ is the expectation value of the perturbation; second-order E_n^(2) = ",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-resurgence-connects-perturbative-nonperturbative-qft"
      ],
      "communication_gap": "Physicists learn perturbation theory operationally (Feynman rules, renormalization) without engaging with the rigorous mathematical analysis of asymptotic series. Mathematicians studying resurgence and transseries rarely interact with QFT practitioners. The Borel summability results of Sokal (1980) and the resurgence program of Écalle are largely unknown in the physics community that uses the results daily.\n",
      "translation_table": [
        {
          "field_a_term": "small parameter λ in H = H₀ + λV",
          "field_b_term": "fine structure constant α ≈ 1/137 in QED",
          "note": "controls the order-by-order expansion; smallness ensures truncation is accurate"
        },
        {
          "field_a_term": "unperturbed eigenstates |n⁰⟩",
          "field_b_term": "free-particle states; tree-level Feynman diagrams",
          "note": "zeroth-order approximation before quantum corrections"
        },
        {
          "field_a_term": "first-order energy correction ⟨n⁰|V|n⁰⟩",
          "field_b_term": "one-loop self-energy correction (Lamb shift, g-2 at α/π)",
          "note": "leading quantum correction; calculable from single vertex insertions"
        },
        {
          "field_a_term": "degenerate perturbation theory (diagonalize V in subspace)",
          "field_b_term": "renormalization mixing of nearly-degenerate states",
          "note": "required when energy denominators vanish"
        },
        {
          "field_a_term": "Fermi's golden rule W = (2π/ℏ)|⟨f|V|i⟩|²ρ(E_f)",
          "field_b_term": "decay rate / scattering cross-section in QFT",
          "note": "first-order time-dependent perturbation theory; the S-matrix at leading order"
        },
        {
          "field_a_term": "asymptotic series (divergent, Borel summable)",
          "field_b_term": "QED perturbation series; instanton corrections beyond all orders",
          "note": "Dyson argument — the series must diverge; Borel resummation recovers physics"
        }
      ],
      "references": [
        {
          "note": "Sakurai & Napolitano (2017) Modern Quantum Mechanics, 2nd ed.; Cambridge"
        },
        {
          "doi": "10.1103/PhysRev.73.416",
          "note": "Schwinger (1948) On quantum electrodynamics and the magnetic moment of the electron; Phys Rev 73:416"
        },
        {
          "doi": "10.1103/PhysRevLett.123.137601",
          "note": "Aoyama et al. (2019) Theory of the anomalous magnetic moment of the electron; Phys Rev Lett 123:137601"
        },
        {
          "doi": "10.1103/PhysRev.85.631",
          "note": "Dyson (1952) Divergence of perturbation theory in quantum electrodynamics; Phys Rev 85:631"
        }
      ],
      "last_reviewed": "2026-05-07",
      "file": "cross-domain/mathematics-physics/b-perturbation-theory-quantum-corrections.yaml"
    },
    {
      "id": "b-renormalization-group-scale-invariance",
      "title": "Renormalization group and scale invariance — the mathematics of how physical laws transform across observation scales unifies critical phenomena, QCD, and universality classes",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "The renormalization group (Wilson 1971) describes how physical laws change with observation scale. RG flow: systematically integrate out short-wavelength degrees of freedom → effective theory at longer scales. Fixed points of the RG flow correspond to scale-invariant theories (critical points). The ",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-rg-universality-neural-network-criticality"
      ],
      "communication_gap": "Despite Wilson's Nobel Prize (1982), RG is taught differently across physics sub-disciplines: condensed matter physicists learn block spins, particle physicists learn the Callan-Symanzik equation, and string theorists learn AdS/CFT. The underlying mathematical unity is rarely taught as a coherent framework across these communities. Applied mathematics has largely ignored RG despite its potential for multiscale analysis of PDEs.\n",
      "translation_table": [
        {
          "field_a_term": "beta function β(g) = μ dg/dμ",
          "field_b_term": "flow equation in coupling constant space",
          "note": "Positive β: coupling grows at long distances (IR slavery); negative β: asymptotic freedom"
        },
        {
          "field_a_term": "UV fixed point (g* with β(g*)=0)",
          "field_b_term": "critical point of a statistical mechanical system",
          "note": "Scale-invariant theory; power-law correlations; no characteristic length scale"
        },
        {
          "field_a_term": "relevant operator (grows under RG flow)",
          "field_b_term": "relevant perturbation (drives system away from criticality)",
          "note": "Temperature T-Tc is always a relevant perturbation near a critical point"
        },
        {
          "field_a_term": "universality class (same fixed point, same exponents)",
          "field_b_term": "equivalence class of different physical systems",
          "note": "Water-steam and Ising magnet are in the same universality class (3D Ising)"
        },
        {
          "field_a_term": "conformal field theory (CFT at fixed point)",
          "field_b_term": "exactly solvable 2D critical models (Ising, Potts)",
          "note": "2D CFT is exactly solvable via infinite-dimensional Virasoro symmetry"
        }
      ],
      "references": [
        {
          "doi": "10.1016/0370-1573(74)90023-4",
          "note": "Wilson & Kogut (1974) Phys Rep 12:75 — comprehensive review of exact RG"
        },
        {
          "doi": "10.1103/PhysRevB.4.3174",
          "note": "Wilson (1971) Phys Rev B 4:3174 — original RG paper on critical phenomena"
        },
        {
          "note": "Kadanoff (1966) Physics 2:263 — block spin decimation, precursor to Wilson RG"
        },
        {
          "note": "Cardy (1996) Scaling and Renormalization in Statistical Physics (Cambridge, ISBN 0521499593) — accessible graduate text"
        }
      ],
      "last_reviewed": "2026-05-06",
      "file": "cross-domain/mathematics-physics/b-renormalization-group-scale-invariance.yaml"
    },
    {
      "id": "b-ricci-flow-x-geometrization-program",
      "title": "Hamilton's Ricci flow deforms a Riemannian metric by ∂g/∂t = −2 Ric(g), smoothing curvature much like a nonlinear diffusion of geometry; Hamilton's program and Perelman's completion classify three-manifolds by blowing down singularities via surgery — offering a physics intuition that geometric singularization resembles curvature evacuation analogous to diffusion-driven blow-up control in nonlinear PDE.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "Ricci flow is a heat-type equation on metrics trading topological complexity for analytic control: short-time existence parallels nonlinear diffusion smoothing irregularities; formation of singularities triggers surgical modifications analogous to cutting out blow-up regions and gluing caps (Perelma",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-ricci-flow-x-geometrization-program"
      ],
      "communication_gap": "Pure mathematics outlets publish complete proofs; physics pedagogy rarely assigns Ricci flow except in GR or string theory niches, so students miss the Hamilton–Perelman storyline unless reading survey articles aimed at broad scientists.\n",
      "translation_table": [
        {
          "field_a_term": "Ricci curvature Ric(g)",
          "field_b_term": "Diffusion-like flux redistributing curvature density along the manifold under ∂g/∂t = −2 Ric",
          "note": "Curvature decreases where diffusion analogy holds locally before singularities dominate."
        },
        {
          "field_a_term": "Surgery at singular times (Perelman)",
          "field_b_term": "Removing blow-up neighborhoods and replacing with standard caps (hand-cut surgery metaphor)",
          "note": "Matches engineering intuition of removing defect cores while preserving global topology invariants."
        },
        {
          "field_a_term": "Thurston geometrization conjecture (now theorem)",
          "field_b_term": "Physical expectation that locally homogeneous geometries emerge as effective descriptions after RG-like iteration",
          "note": "Informal RG analogy only — not a literal Wilsonian partition function."
        }
      ],
      "references": [
        {
          "doi": "10.4310/jdg/1214436922",
          "note": "Hamilton (1982) J. Diff. Geom. — three-manifolds with positive Ricci curvature (foundational Ricci flow)"
        },
        {
          "arxiv": "math/0211159",
          "note": "Perelman (2002) arXiv math.DG/0211159 — first Ricci flow with surgery paper (check local citation conventions)"
        },
        {
          "doi": "10.4310/jdg/1214345587",
          "note": "Perelman (2003) JDG — entropy functional for Ricci flow (published version of later preprints)"
        }
      ],
      "last_reviewed": "2026-05-07",
      "file": "cross-domain/mathematics-physics/b-ricci-flow-x-geometrization-program.yaml"
    },
    {
      "id": "b-riemann-hypothesis-quantum-chaos-montgomery-odlyzko",
      "title": "The zeros of the Riemann zeta function are statistically distributed like eigenvalues of random Hermitian matrices (GUE), the same ensemble that describes energy-level spacings in quantum-chaotic systems — the Montgomery-Odlyzko law.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "proposed",
      "bridge_claim": "Montgomery (1973) proved that the pair-correlation of Riemann zeta zeros matches the GUE (Gaussian Unitary Ensemble) pair-correlation function — the same distribution Wigner and Dyson found for energy-level spacings in quantum-chaotic Hamiltonians. Odlyzko's numerics confirmed this to extraordinary ",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-riemann-zeros-quantum-chaotic-spectrum"
      ],
      "communication_gap": "Number theorists studying the Riemann hypothesis and physicists studying quantum chaos have interacted productively since the 1970s, but the mathematical proof of the connection is still lacking; many number theorists are unfamiliar with the random matrix machinery, and many physicists are unaware of recent analytic number theory results on moments of the zeta function.\n",
      "translation_table": [
        {
          "field_a_term": "nontrivial zeros of ζ(s) on the critical line (mathematics)",
          "field_b_term": "energy eigenvalues of a quantum-chaotic Hamiltonian (physics)",
          "note": "Both exhibit GUE level-spacing statistics — level repulsion and universal correlations"
        },
        {
          "field_a_term": "Montgomery pair-correlation function (mathematics)",
          "field_b_term": "Dyson-Mehta two-point correlation in GUE (physics)",
          "note": "The functions agree to high numerical precision for large zeros"
        },
        {
          "field_a_term": "Riemann-Siegel formula / zero-counting function N(T) (mathematics)",
          "field_b_term": "Weyl law for eigenvalue density in quantum chaos (physics)",
          "note": "Both count eigenvalues/zeros up to a given height with the same asymptotic form"
        },
        {
          "field_a_term": "L-function universality class (mathematics)",
          "field_b_term": "symmetry class of random matrix ensemble (physics)",
          "note": "Different L-functions correspond to different RMT ensembles (GUE, GOE, GSE)"
        }
      ],
      "references": [
        {
          "doi": "10.1090/pspum/024/9944",
          "note": "Montgomery (1973) — pair correlation of zeta zeros and GUE conjecture"
        },
        {
          "doi": "10.1007/BF01579347",
          "note": "Odlyzko (1987) — numerical evidence for GUE statistics in Riemann zeros"
        },
        {
          "doi": "10.1080/10586458.2003.10504509",
          "note": "Katz & Sarnak (1999) — random matrices, Frobenius eigenvalues, and monodromy"
        }
      ],
      "last_reviewed": "2026-05-07",
      "file": "cross-domain/mathematics-physics/b-riemann-hypothesis-quantum-chaos-montgomery-odlyzko.yaml"
    },
    {
      "id": "b-stochastic-de-quantum-field-theory",
      "title": "Parisi-Wu stochastic quantization maps quantum field theory path integrals onto the equilibrium distribution of a Langevin stochastic process in a fictitious fifth (stochastic-time) dimension, with the Onsager-Machlup action as the classical-path analog of the Feynman amplitude, bridging stochastic differential equations and QFT.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "The Parisi-Wu (1981) stochastic quantization scheme shows that the quantum expectation values of any field theory ⟨O[φ]⟩ can be obtained as equilibrium averages of a stochastic process: ∂φ/∂τ = −δS/δφ + η(x,τ), where τ is a fictitious fifth dimension (\"stochastic time\"), S[φ] is the Euclidean action",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-onsager-machlup-loop-expansion-qft-thermal-field-theory"
      ],
      "communication_gap": "Stochastic differential equations are studied in mathematics departments (probability theory, Itô calculus, SPDE theory — journals: Stochastic Processes and their Applications, Ann Probab) essentially independently of quantum field theory (theoretical physics — journals: Phys Rev D, Nucl Phys B). The Parisi-Wu paper appeared in Scientia Sinica (Chinese science journal) in 1981 and took years to propagate. The mathematical community developing regularity structures for SPDEs (Hairer, 2014 Fields Medal) rarely engages with QFT applications. Physicists using the Fokker-Planck formalism rarely cite Itô calculus literature.\n",
      "translation_table": [
        {
          "field_a_term": "Langevin equation ẋ = −∇U/γ + ξ(t) (overdamped Brownian particle)",
          "field_b_term": "stochastic quantization flow ∂φ/∂τ = −δS/δφ + η (field in fictitious time)"
        },
        {
          "field_a_term": "Fokker-Planck stationary distribution P_eq ∝ exp(−U/kT)",
          "field_b_term": "quantum path integral measure exp(−S[φ]/ℏ)"
        },
        {
          "field_a_term": "thermal fluctuation strength kT",
          "field_b_term": "quantum fluctuation strength ℏ (Planck constant)"
        },
        {
          "field_a_term": "Onsager-Machlup action S_OM (classical stochastic path weight)",
          "field_b_term": "Feynman Euclidean action S_E (quantum amplitude weight)"
        },
        {
          "field_a_term": "Itô vs. Stratonovich prescription (stochastic calculus)",
          "field_b_term": "normal ordering vs. Weyl ordering (quantum operator ambiguity)"
        },
        {
          "field_a_term": "equilibrium time τ→∞ of stochastic process",
          "field_b_term": "renormalisation group fixed point of quantum field theory"
        }
      ],
      "references": [
        {
          "note": "Parisi & Wu (1981) Sci Sin 24:483 — stochastic quantization original paper"
        },
        {
          "doi": "10.1103/PhysRev.91.1505",
          "note": "Onsager & Machlup (1953) Phys Rev 91:1505 — fluctuations and irreversible processes"
        },
        {
          "note": "Zwanzig (2001) Nonequilibrium Statistical Mechanics — Oxford University Press"
        },
        {
          "note": "Gardiner (2009) Stochastic Methods: A Handbook for the Natural and Social Sciences — Springer"
        }
      ],
      "last_reviewed": "2026-05-06",
      "file": "cross-domain/mathematics-physics/b-stochastic-de-quantum-field-theory.yaml"
    },
    {
      "id": "b-stochastic-quantization-qft",
      "title": "THE 250th BRIDGE: Parisi-Wu stochastic quantization (1981) maps quantum field theory onto stochastic differential equations by deriving quantum amplitudes as the equilibrium distribution of a Langevin process in fictitious time τ, connecting Itô stochastic calculus (the mathematics of Brownian motion) to the path integral formulation of quantum mechanics — the deepest known bridge between stochastic mathematics and quantum physics.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "Parisi & Wu (1981) proposed that quantum field theory amplitudes can be computed as the equilibrium distribution of a fictitious Markov process in a fifth (Langevin) time τ. The stochastic quantization equation is:\n  ∂φ(x,τ)/∂τ = -δS[φ]/δφ(x,τ) + η(x,τ)\nwhere S[φ] is the Euclidean classical action, ",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-langlands-physics-electric-magnetic-duality"
      ],
      "communication_gap": "Parisi & Wu (1981) published in Chinese (Scientia Sinica) and the result was initially unknown in the West; Damgaard & Hüffel (1987) wrote the comprehensive review that brought stochastic quantization to the Western physics community. The connection to modern machine learning (diffusion models) was not recognized until Song & Ermon (2019); the score function = -δS/δφ equivalence to stochastic quantization is mentioned occasionally in machine learning theory papers but has not been systematically developed. The SDE mathematics (Itô calculus, Fokker-Planck operators) is taught in mathematics departments but rarely connected to quantum field theory in physics curricula.\n",
      "translation_table": [
        {
          "field_a_term": "Euclidean action S[φ] in quantum field theory",
          "field_b_term": "energy function / potential in stochastic dynamics (Lyapunov function)",
          "note": "S[φ] plays the role of -log P[φ] in the equilibrium distribution — identical to statistical mechanics"
        },
        {
          "field_a_term": "Feynman path integral weight exp(-S_E[φ]) (Euclidean)",
          "field_b_term": "Boltzmann distribution exp(-βE) at inverse temperature β=1",
          "note": "Wick rotation to imaginary time τ = it maps quantum oscillations to thermal fluctuations"
        },
        {
          "field_a_term": "Langevin equation stochastic force η(x,τ) (white noise)",
          "field_b_term": "thermal noise in Brownian motion / fluctuation-dissipation theorem",
          "note": "the noise amplitude 2 in ⟨ηη⟩=2δ is the fluctuation-dissipation coefficient (= 2kT in classical case)"
        },
        {
          "field_a_term": "Fokker-Planck equation (evolution of probability density in field space)",
          "field_b_term": "diffusion equation for probability density of particle positions",
          "note": "both are parabolic PDEs; stochastic quantization Fokker-Planck has field configuration space as state space"
        },
        {
          "field_a_term": "equilibrium distribution P∝exp(-S) of Langevin process",
          "field_b_term": "quantum vacuum = ground state expectation values in Euclidean QFT",
          "note": "the fundamental equivalence — quantum vacuum physics = stochastic equilibrium"
        },
        {
          "field_a_term": "Langevin \"time\" τ (fictitious 5th dimension)",
          "field_b_term": "Monte Carlo steps in lattice QCD simulation",
          "note": "stochastic quantization provides theoretical basis for MCMC sampling of lattice gauge fields"
        }
      ],
      "references": [
        {
          "note": "Parisi & Wu (1981) — Perturbation theory without gauge fixing; Sci Sin 24:483"
        },
        {
          "doi": "10.1016/0370-1573(87)90144-X",
          "note": "Damgaard & Hüffel (1987) — Stochastic quantization; Phys Rep 152:227"
        },
        {
          "doi": "10.1103/PhysRev.150.1079",
          "note": "Nelson (1966) — Derivation of the Schrödinger equation from Newtonian mechanics; Phys Rev 150:1079"
        },
        {
          "note": "Zinn-Justin (2002) — Quantum Field Theory and Critical Phenomena; Oxford University Press"
        }
      ],
      "last_reviewed": "2026-05-06",
      "file": "cross-domain/mathematics-physics/b-stochastic-quantization-qft.yaml"
    },
    {
      "id": "b-symplectic-geometry-hamiltonian",
      "title": "Hamiltonian mechanics lives on a symplectic manifold where the 2-form omega generates evolution, Liouville's theorem is phase-space volume conservation, Arnold-Liouville integrability creates KAM tori, and Gromov's non-squeezing theorem sets a topological obstruction to phase-space compression — making symplectic geometry the natural mathematical language of classical and quantum mechanics.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "Symplectic geometry provides the rigorous mathematical foundation for Hamiltonian mechanics, revealing deep geometric structures that constrain the dynamics of physical systems from atomic scales to planetary orbits.\nSymplectic manifold: a symplectic manifold (M, ω) is a smooth even-dimensional mani",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-gromov-nonsqueezing-quantum-uncertainty-derivation"
      ],
      "communication_gap": "Symplectic geometry was developed as pure mathematics (Elie Cartan, 1920s; Darboux 1880s) without explicit awareness of its relevance to mechanics. Arnold's \"Mathematical Methods of Classical Mechanics\" (1974, English 1978) made the connection explicit and accessible, but required graduate mathematics expertise that most physicists lacked. Most physics graduate programmes teach Hamiltonian mechanics in Lagrangian/Newtonian notation without mentioning differential forms. Gromov's non-squeezing theorem (1985) appeared in Inventiones Mathematicae (pure mathematics) and its physical interpretation took years to become standard. The uncertainty-principle connection is sometimes noted in textbooks but rarely developed rigorously. Classical mechanics and differential geometry remain institutionally separated in most universities.\n",
      "translation_table": [
        {
          "field_a_term": "symplectic 2-form ω = Σ dqᵢ ∧ dpᵢ",
          "field_b_term": "fundamental phase-space measure in statistical mechanics (Liouville measure)",
          "note": "invariance of ω under Hamiltonian flow is Liouville's theorem"
        },
        {
          "field_a_term": "Hamiltonian vector field X_H (ι_{X_H}ω = dH)",
          "field_b_term": "Hamilton's equations of motion (dq/dt = ∂H/∂p, dp/dt = -∂H/∂q)",
          "note": "geometric definition; coordinate-free; specialises to standard form in Darboux coordinates"
        },
        {
          "field_a_term": "symplectomorphism (diffeomorphism preserving ω)",
          "field_b_term": "canonical transformation (preserving Poisson brackets)",
          "note": "same concept; different terminology across mathematics and physics"
        },
        {
          "field_a_term": "Arnold-Liouville action-angle coordinates (I, θ)",
          "field_b_term": "adiabatic invariant I = ∮ p dq / 2π in classical mechanics",
          "note": "action variables are adiabatic invariants; angle variables are cyclic"
        },
        {
          "field_a_term": "KAM tori (surviving invariant tori under perturbation)",
          "field_b_term": "stability of near-integrable Hamiltonian systems (solar system stability)",
          "note": "KAM theory proves that most tori survive if the perturbation is small enough"
        },
        {
          "field_a_term": "Gromov non-squeezing (ball in cylinder obstruction)",
          "field_b_term": "phase space area inequality (stronger than Liouville volume conservation)",
          "note": "non-squeezing is the classical analogue of the Heisenberg uncertainty principle"
        },
        {
          "field_a_term": "Poisson bracket {F,G} = Σ(∂F/∂qᵢ ∂G/∂pᵢ - ∂F/∂pᵢ ∂G/∂qᵢ)",
          "field_b_term": "quantum commutator [F̂,Ĝ]/iℏ (deformation quantisation)",
          "note": "Dirac's quantisation rule; symplectic geometry underlies quantum mechanics"
        }
      ],
      "references": [
        {
          "note": "Arnold (1989) Mathematical Methods of Classical Mechanics, 2nd ed. Springer, New York"
        },
        {
          "doi": "10.1007/BF01388806",
          "note": "Gromov (1985) Invent Math 82:307 — non-squeezing theorem"
        },
        {
          "note": "McDuff & Salamon (2017) Introduction to Symplectic Topology, 3rd ed. Oxford University Press"
        },
        {
          "note": "Hofer & Zehnder (1994) Symplectic Invariants and Hamiltonian Dynamics. Birkhauser, Basel"
        }
      ],
      "last_reviewed": "2026-05-06",
      "file": "cross-domain/mathematics-physics/b-symplectic-geometry-hamiltonian.yaml"
    },
    {
      "id": "b-symplectic-geometry-mechanics",
      "title": "Hamilton's equations are flows on a symplectic manifold (M, ω), Noether's theorem is the statement that Hamiltonian symmetries preserve the symplectic form, and quantum mechanics is the deformation quantization of the classical symplectic structure — making symplectic geometry the exact mathematical language of mechanics at every scale from classical to quantum.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "Classical mechanics is entirely captured by symplectic geometry: the phase space (q, p) of a mechanical system is a symplectic manifold (M, ω) where ω = dq ∧ dp is the symplectic 2-form. Hamilton's equations\n\n  ṗ = -∂H/∂q,  q̇ = ∂H/∂q\n\nare precisely the flow of the Hamiltonian vector field X_H defin",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-symplectic-quantization-new-prediction"
      ],
      "communication_gap": "Arnold (1978) demonstrated that classical mechanics can be entirely reformulated in symplectic language, but physics textbooks still teach Hamilton's equations as differential equations without the geometric context. The symplectic geometry literature (Guillemin, Sternberg, McDuff, Salamon) is written for mathematicians. The deformation quantization connection (Kontsevich 1997 formality theorem, Fields Medal 1998) is essentially unknown to most physicists outside mathematical physics. The result is that physicists reinvent symplectic results in coordinate language while mathematicians prove theorems unconnected to physical intuition.\n",
      "translation_table": [
        {
          "field_a_term": "Symplectic manifold (M, ω)",
          "field_b_term": "Phase space of a mechanical system",
          "note": "Coordinates (q^i, p_i) are Darboux coordinates; ω = Σ dq^i ∧ dp_i globally"
        },
        {
          "field_a_term": "Hamiltonian vector field X_H",
          "field_b_term": "Hamilton's equations of motion",
          "note": "ι_{X_H} ω = dH; the flow of X_H is the time evolution of the mechanical system"
        },
        {
          "field_a_term": "Lagrangian submanifold",
          "field_b_term": "Classical trajectory (solution to equations of motion)",
          "note": "The graph of dS (Hamilton's principal function) is a Lagrangian submanifold"
        },
        {
          "field_a_term": "Moment map μ: M → g*",
          "field_b_term": "Conserved quantities via Noether's theorem",
          "note": "Components of μ are the conserved charges associated to the symmetry group G"
        },
        {
          "field_a_term": "Symplectic reduction μ^{-1}(0)/G",
          "field_b_term": "Constraint elimination in Dirac's theory",
          "note": "First-class constraints generate gauge symmetries; reduction removes them"
        },
        {
          "field_a_term": "Deformation quantisation (ℏ-parameter)",
          "field_b_term": "Transition from classical to quantum mechanics",
          "note": "Poisson bracket {f,g} deforms to commutator [f̂,ĝ]/(iℏ) as ℏ → 0"
        },
        {
          "field_a_term": "Symplectic volume ω^n / n!",
          "field_b_term": "Liouville measure in statistical mechanics",
          "note": "Preserved by Hamiltonian flow; foundation of the equiprobability of phase space microstates"
        }
      ],
      "references": [
        {
          "note": "Arnold (1978) Mathematical Methods of Classical Mechanics (Springer) — definitive treatment of symplectic mechanics",
          "url": "https://link.springer.com/book/10.1007/978-1-4757-2063-1"
        },
        {
          "doi": "10.1007/BF01209016",
          "note": "Weyl (1931) — quantization and group theory; early deformation quantization"
        },
        {
          "note": "Dirac (1964) Lectures on Quantum Mechanics (Yeshiva UP) — constraint quantization and symplectic reduction",
          "url": "https://store.doverpublications.com/0486417131.html"
        },
        {
          "doi": "10.1023/A:1021693402513",
          "note": "Kontsevich (2003) Lett Math Phys 66:157 — deformation quantization of Poisson manifolds; Fields Medal result"
        },
        {
          "note": "McDuff & Salamon (2017) Introduction to Symplectic Topology, 3rd ed. (Oxford) — modern reference",
          "url": "https://oxford.universitypressscholarship.com/view/10.1093/oso/9780198794899.001.0001/oso-9780198794899"
        }
      ],
      "last_reviewed": "2026-05-05",
      "file": "cross-domain/mathematics-physics/b-symplectic-geometry-mechanics.yaml"
    },
    {
      "id": "b-topological-defects-homotopy-x-condensed-matter-order",
      "title": "Homotopy classification of order-parameter manifolds predicts defect types and stability classes in condensed matter symmetry-breaking transitions.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "The fundamental group and higher homotopy groups of an order-parameter manifold determine allowable line, point, and texture defects after symmetry breaking. This creates a direct bridge between abstract topology and measurable defect statistics in superfluids, liquid crystals, and superconducting-l",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-defect-topology-predicts-coarsening-scaling-exponents"
      ],
      "communication_gap": "Topology provides clean invariants, but experiments often report morphology-only descriptors without linking to homotopy classes or conservation laws.\n",
      "translation_table": [
        {
          "field_a_term": "homotopy groups pi_n(M)",
          "field_b_term": "defect taxonomy (vortices, monopoles, domain walls)",
          "note": "Nontrivial classes identify topologically protected defect families."
        },
        {
          "field_a_term": "geodesic rule and random phase domains",
          "field_b_term": "defect density scaling after quenches",
          "note": "Correlation length at freeze-out sets expected initial defect count."
        },
        {
          "field_a_term": "topological charge conservation",
          "field_b_term": "annihilation/coarsening constraints in late-time dynamics",
          "note": "Pair annihilation rates are constrained by charge compatibility."
        }
      ],
      "references": [
        {
          "doi": "10.1103/PhysRevLett.76.2077",
          "note": "Zurek (1996), cosmological experiments in superfluid helium and defect formation scaling."
        }
      ],
      "last_reviewed": "2026-05-08",
      "file": "cross-domain/mathematics-physics/b-topological-defects-homotopy-x-condensed-matter-order.yaml"
    },
    {
      "id": "b-topology-condensed-matter",
      "title": "Topological quantum matter is classified by homotopy groups and Chern numbers — the integer Hall conductance σ_xy = (e²/h)C₁ is a topological invariant of the occupied band bundle, and the tenfold Altland-Zirnbauer symmetry classification maps condensed matter physics onto K-theory.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "The quantum Hall effect (von Klitzing 1980) revealed that electrical conductance can be quantised to integer multiples of e²/h with precision better than 10⁻⁹, robust to disorder and sample imperfections. The mathematical explanation (TKNN: Thouless-Kohmoto-Nightingale-den Nijs 1982) identifies the ",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-topology-chern-number-predicts-edge-state-count"
      ],
      "communication_gap": "Algebraic topology (fibre bundles, Chern classes, K-theory) is graduate mathematics taught in pure mathematics departments with no connection to condensed matter physics. Experimental condensed matter physicists measure quantum Hall conductance without necessarily understanding the Chern number proof. Topological field theory (Chern-Simons) bridges the two but requires both high-energy physics and differential geometry, creating a high entry barrier that limits cross-fertilisation.\n",
      "translation_table": [
        {
          "field_a_term": "first Chern number C₁ (principal U(1) bundle over T²)",
          "field_b_term": "quantised Hall conductance σ_xy = νe²/h (ν ∈ ℤ)",
          "note": "TKNN proved this exact equality; C₁ counts the number of edge states by bulk-boundary correspondence"
        },
        {
          "field_a_term": "Berry phase γ = ∮ A_k·dk (holonomy of bundle connection)",
          "field_b_term": "Aharonov-Bohm phase of electron in magnetic flux (physical Berry phase)",
          "note": "Berry curvature integrated over BZ gives Chern number; Berry phase around a loop is physically measurable"
        },
        {
          "field_a_term": "K-theory classification of vector bundles",
          "field_b_term": "tenfold-way periodic table of topological insulators and superconductors",
          "note": "Kitaev (2009) derived the full classification using K-theory of Clifford algebras"
        },
        {
          "field_a_term": "homotopy group π_n(classifying space)",
          "field_b_term": "topological invariant in n-dimensional system (ℤ or ℤ₂)",
          "note": "Different symmetry classes have different classifying spaces; πd determines which invariants exist"
        },
        {
          "field_a_term": "topological order (long-range entanglement, anyons)",
          "field_b_term": "fractional quantum Hall effect — quasiparticles with charge e/3, anyonic statistics",
          "note": "FQH states are not captured by band topology; they require many-body topological field theory (Chern-Simons)"
        }
      ],
      "references": [
        {
          "doi": "10.1103/PhysRevLett.45.494",
          "note": "Klitzing et al. (1980) Phys Rev Lett 45:494 — discovery of the integer quantum Hall effect; Nobel Prize 1985"
        },
        {
          "doi": "10.1103/PhysRevLett.49.405",
          "note": "Thouless et al. (1982) Phys Rev Lett 49:405 — TKNN invariant; quantised Hall conductance as Chern number"
        },
        {
          "doi": "10.1103/PhysRevB.55.1142",
          "note": "Altland & Zirnbauer (1997) Phys Rev B 55:1142 — non-standard symmetry classes; tenfold way classification"
        },
        {
          "note": "Wen, X.G. (2004) Quantum Field Theory of Many-Body Systems. Oxford University Press — topological order and fractional quantum Hall"
        }
      ],
      "last_reviewed": "2026-05-06",
      "file": "cross-domain/mathematics-physics/b-topology-condensed-matter.yaml"
    },
    {
      "id": "b-hopf-algebras-feynman-renormalization",
      "title": "The renormalization of Feynman diagrams in quantum field theory has an exact algebraic structure given by a Hopf algebra of rooted trees (Connes-Kreimer), making perturbative renormalization a theorem in non-commutative geometry rather than an ad hoc procedure.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "Connes and Kreimer showed that the set of Feynman diagrams under the operation of subdivergence removal forms a commutative Hopf algebra H_FG (the Feynman graph Hopf algebra), with coproduct Delta encoding overlapping subdivergence structure; the BPHZ renormalization procedure is exactly the antipod",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-motives-feynman-amplitudes-arithmetic"
      ],
      "communication_gap": "Physicists learn renormalization through calculation while algebraists study Hopf algebras abstractly; the Connes-Kreimer connection (established 1998-2000) is known in mathematical physics but rarely taught in standard QFT courses, where renormalization is still presented through the BPHZ forest formula without its Hopf algebraic interpretation.\n",
      "translation_table": [
        {
          "field_a_term": "Feynman diagram with subdivergences (QFT)",
          "field_b_term": "element of Hopf algebra H_FG (mathematics)",
          "note": "Each 1PI Feynman graph is a generator; the coproduct encodes its subdivergence structure"
        },
        {
          "field_a_term": "BPHZ counterterm subtraction (QFT renormalization)",
          "field_b_term": "antipode S in the Hopf algebra (mathematics)",
          "note": "The renormalized diagram is the convolution of the antipode with the Feynman rules character"
        },
        {
          "field_a_term": "Dyson-Schwinger equations (QFT)",
          "field_b_term": "fixed-point equations in H_FG (combinatorial Hopf algebra)",
          "note": "DS equations become recursive equations for elements of H_FG, revealing their combinatorial structure"
        },
        {
          "field_a_term": "dimensional regularization (QFT)",
          "field_b_term": "Rota-Baxter algebra structure of Laurent series in epsilon",
          "note": "The minimal subtraction scheme corresponds to the Birkhoff decomposition with respect to the epsilon pole"
        }
      ],
      "references": [
        {
          "doi": "10.1007/s002200050499",
          "note": "Connes & Kreimer (1998) - Hopf algebras, renormalization and noncommutative geometry"
        },
        {
          "doi": "10.1007/s002200050730",
          "note": "Kreimer (1999) - on the Hopf algebra structure of perturbative quantum field theories"
        },
        {
          "doi": "10.1007/s00220-001-0528-7",
          "note": "Connes & Kreimer (2001) - renormalization in quantum field theory and Riemann-Hilbert problem"
        }
      ],
      "last_reviewed": "2026-05-07",
      "file": "cross-domain/mathematics-quantum-physics/b-hopf-algebras-feynman-renormalization.yaml"
    },
    {
      "id": "b-spectral-theory-quantum-mechanics",
      "title": "Quantum mechanics is functional analysis applied to physics — observables are self-adjoint operators and measurement outcomes are their eigenvalues",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "The mathematical framework of quantum mechanics is exactly the spectral theory of self-adjoint operators on a Hilbert space. Observables are self-adjoint operators; measurement outcomes are eigenvalues (guaranteed real by self-adjointness); measurement probabilities are given by the spectral measure",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-quantum-spectral-gap-computational-complexity"
      ],
      "communication_gap": "Physicists learn quantum mechanics through wave functions and the Schrödinger equation before encountering operator theory; mathematicians learn functional analysis abstractly without physical motivation. The complete mathematical foundations are taught in mathematical physics courses but rarely to either physics or mathematics students alone.\n",
      "translation_table": [
        {
          "field_a_term": "self-adjoint operator A on Hilbert space",
          "field_b_term": "physical observable (position, momentum, energy, spin)",
          "note": "Self-adjointness guarantees real eigenvalues = real measurement outcomes"
        },
        {
          "field_a_term": "eigenvalue of A",
          "field_b_term": "possible measurement outcome",
          "note": "Spectral theorem guarantees complete set of eigenvalues"
        },
        {
          "field_a_term": "spectral measure (projection-valued measure)",
          "field_b_term": "Born rule probability distribution",
          "note": "P(A in E) = <psi|P_E|psi> where P_E is the spectral projection onto E"
        },
        {
          "field_a_term": "unitary group generated by self-adjoint H",
          "field_b_term": "quantum time evolution operator exp(-iHt/hbar)",
          "note": "Stone's theorem: every strongly continuous unitary group has a self-adjoint generator"
        },
        {
          "field_a_term": "tensor product of Hilbert spaces",
          "field_b_term": "composite quantum system / entanglement",
          "note": "Entanglement = non-separable states in tensor product space"
        }
      ],
      "references": [
        {
          "note": "von Neumann, J. (1932). Mathematische Grundlagen der Quantenmechanik. Springer."
        },
        {
          "doi": "10.2307/1968551",
          "note": "Stone, M.H. (1932). On one-parameter unitary groups in Hilbert space. Ann Math 33:643."
        },
        {
          "note": "Reed, M. & Simon, B. (1972). Methods of Modern Mathematical Physics. Academic Press."
        },
        {
          "note": "Dirac, P.A.M. (1930). The Principles of Quantum Mechanics. Oxford University Press."
        }
      ],
      "last_reviewed": "2026-05-06",
      "file": "cross-domain/mathematics-quantum-physics/b-spectral-theory-quantum-mechanics.yaml"
    },
    {
      "id": "b-cooperative-game-theory-coalitions",
      "title": "Cooperative game theory's core, Shapley value, and nucleolus provide axiomatic frameworks for fair allocation in coalition formation, with direct applications to cost-sharing institutions, climate agreements, and multi-party negotiations.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "A cooperative game (N, v) consists of a player set N and characteristic function v(S) giving the value any coalition S ⊆ N can achieve independently. The core is the set of allocations x where no coalition can do better (Σᵢ∈S xᵢ ≥ v(S) for all S) — it may be empty (empty core ↔ no stable allocation)",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-shapley-value-predicts-international-climate-burden-sharing"
      ],
      "communication_gap": "Cooperative game theory is taught in economics and mathematics departments but rarely in political science or international relations where treaty negotiation actually occurs. Political scientists use qualitative bargaining theory; economists use cooperative game theory; they rarely read the same journals. Legal scholars who design bankruptcy law do not typically know the Aumann-Maschler bankruptcy game or its Shapley value solution. The mathematics community that proved these theorems is largely disconnected from the social science and policy communities that could apply them.\n",
      "translation_table": [
        {
          "field_a_term": "characteristic function v(S)",
          "field_b_term": "coalition's best achievable outcome in a negotiation",
          "note": "In climate agreements, v(S) = total emission reduction achievable by coalition S"
        },
        {
          "field_a_term": "Shapley value φᵢ(v)",
          "field_b_term": "fair share of gains from cooperation / cost allocation",
          "note": "Used in EU budget allocation debates, network cost sharing, bandwidth pricing"
        },
        {
          "field_a_term": "the core (set of stable allocations)",
          "field_b_term": "politically feasible allocation space (no country worse off than going alone)",
          "note": "Empty core → no stable multilateral agreement exists without side payments"
        },
        {
          "field_a_term": "nucleolus (minimises maximum coalition complaint)",
          "field_b_term": "leximin allocation — most egalitarian stable agreement",
          "note": "Nucleolus selects the allocation that minimises the worst-off coalition's complaint"
        },
        {
          "field_a_term": "Nash bargaining solution",
          "field_b_term": "bilateral trade deals, labor-management negotiations",
          "note": "Nash product maximisation has a natural interpretation as proportional to gains from trade"
        }
      ],
      "references": [
        {
          "note": "Shapley (1953) — A value for n-person games, Contributions to the Theory of Games II, Princeton"
        },
        {
          "doi": "10.2307/1907951",
          "note": "Nash (1950) — The bargaining problem, Econometrica 18:155"
        },
        {
          "doi": "10.1137/0117107",
          "note": "Schmeidler (1969) — The nucleolus of a characteristic function game, J Appl Math 17:1163"
        },
        {
          "doi": "10.1016/0022-0531(79)90064-1",
          "note": "Roth & Verrecchia (1979) — The Shapley value as applied to cost allocation, J Math Econ 6:295"
        }
      ],
      "last_reviewed": "2026-05-06",
      "file": "cross-domain/mathematics-social-science/b-cooperative-game-theory-coalitions.yaml"
    },
    {
      "id": "b-fair-division-combinatorics",
      "title": "Envy-free cake cutting for n agents connects Sperner's lemma in combinatorics to fair division in social science: the existence of envy-free allocations for heterogeneous divisible goods follows from topological fixed-point arguments (Sperner-Brouwer), while spectrum allocation, inheritance law, and parliamentary seat apportionment use combinatorial fair division algorithms derived from the same mathematical foundations.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "The Steinhaus-Banach I-cut-you-choose procedure (1948) gives an envy-free allocation for n=2 agents. For n=3: the Selfridge-Conway procedure achieves envy-freeness in a finite number of cuts. For n>=3 generally: Stromquist (1980) proved no finite envy-free protocol exists for divisible goods with n>",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-collective-action-ostrom-design-principles-v2"
      ],
      "communication_gap": "Fair division mathematics developed in economics and mathematics departments with minimal cross-citation. The combinatorics community (Sperner, Brouwer) developed fixed-point theorems without awareness of fair division applications. Procaccia's work on computational fair division began bridging computer science and economics starting ~2008 but fair division textbooks rarely present the Sperner's lemma connection.\n",
      "translation_table": [
        {
          "field_a_term": "envy-free allocation (no agent prefers another's share)",
          "field_b_term": "Nash equilibrium in fair division game",
          "note": "envy-freeness implies stability — no agent has incentive to demand a trade"
        },
        {
          "field_a_term": "Sperner's lemma (labeled triangulation has fully-labeled simplex)",
          "field_b_term": "existence of envy-free allocation (non-constructive)",
          "note": "topological combinatorics provides existence proof without an explicit algorithm"
        },
        {
          "field_a_term": "I-cut-you-choose (n=2 protocol)",
          "field_b_term": "mechanism design for bilateral fair division",
          "note": "optimal incentive-compatible mechanism for heterogeneous binary division"
        },
        {
          "field_a_term": "EF1 (envy-free up to one item, for discrete goods)",
          "field_b_term": "approximately fair division of indivisible resources",
          "note": "EF1 is the best fairness guarantee achievable for indivisible goods in polynomial time"
        }
      ],
      "references": [
        {
          "doi": "10.2307/1907687",
          "note": "Steinhaus (1948) — The problem of fair division; Econometrica 16:101"
        },
        {
          "doi": "10.2307/2975028",
          "note": "Brams & Taylor (1995) — An envy-free cake division protocol; Am Math Mon 102:9"
        },
        {
          "doi": "10.2307/2589304",
          "note": "Su (1999) — Rental harmony; Sperner's lemma in fair division; Am Math Mon 106:930"
        },
        {
          "doi": "10.1145/2509002",
          "note": "Procaccia (2013) — Cake cutting; not just child's play; Commun ACM 56:36"
        }
      ],
      "last_reviewed": "2026-05-06",
      "file": "cross-domain/mathematics-social-science/b-fair-division-combinatorics.yaml"
    },
    {
      "id": "b-information-cascades-herding",
      "title": "Information Cascades and Herding — Bikhchandani's rational cascade model explains bank runs, market crashes, fashion, and social media virality as informationally inefficient equilibria",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "An information cascade (Bikhchandani, Hirshleifer & Welch 1992) arises when individuals, making decisions sequentially, rationally choose to ignore their own private information and copy the observed actions of predecessors. The canonical model: N agents choose between actions A and B; each receives",
      "open_unknowns": [],
      "related_hypotheses": [],
      "communication_gap": "The information cascade literature sits primarily in economics and game theory journals (Journal of Political Economy, Quarterly Journal of Economics) and is largely unknown to sociologists studying collective behaviour, political scientists studying opinion formation, and computer scientists studying social network dynamics — despite modelling the same phenomena. The distinction between observational learning (cascade-prone) and communicative social learning is crucial for platform design but rarely makes it into technology policy discussions.\n",
      "translation_table": [
        {
          "field_a_term": "Bayesian updating on public action history",
          "field_b_term": "rational inference from observed crowd behaviour",
          "note": "The cascade agent is NOT irrational — they are Bayesian; irrationality is not required to explain herding"
        },
        {
          "field_a_term": "private signal suppression after cascade onset",
          "field_b_term": "dispersed information not aggregated into prices or collective decisions",
          "note": "Hayek's knowledge problem in reverse: decentralised information systems can fail to aggregate information"
        },
        {
          "field_a_term": "cascade fragility (small shock reverses direction)",
          "field_b_term": "sudden reversal of market momentum, fashion, or political consensus",
          "note": "A single high-credibility actor (central banker, celebrity endorser) can reverse a cascade instantly"
        },
        {
          "field_a_term": "observational vs. social learning",
          "field_b_term": "copying actions vs. sharing private information",
          "note": "Social media 'likes' are action signals (cascade-prone); product reviews sharing information reduce herding"
        },
        {
          "field_a_term": "long cascade (all agents herd)",
          "field_b_term": "winner-takes-all market outcome independent of product quality",
          "note": "First-mover advantage and network effects can lock in an inferior product via cascades"
        },
        {
          "field_a_term": "cascade-breaking mechanism (visible dissenter)",
          "field_b_term": "importance of minority voices and independent analysis in deliberative institutions",
          "note": "Devil's advocate roles in committees and deliberate disclosure of dissent help prevent cascade lock-in"
        }
      ],
      "references": [
        {
          "doi": "10.1086/261849",
          "note": "Bikhchandani, Hirshleifer & Welch (1992) J Polit Econ 100:992 — information cascades"
        },
        {
          "doi": "10.2307/2118364",
          "note": "Banerjee (1992) Q J Econ 107:797 — simple model of herd behaviour"
        },
        {
          "note": "Welch (1992) J Finance 47:695 — sequential sales and IPO cascades"
        },
        {
          "note": "Chamley (2004) Rational Herds — comprehensive textbook treatment"
        }
      ],
      "last_reviewed": "2026-05-06",
      "file": "cross-domain/mathematics-social-science/b-information-cascades-herding.yaml"
    },
    {
      "id": "b-matching-theory-labor-markets",
      "title": "The Gale-Shapley deferred acceptance algorithm solves stable matching in O(n²) and directly describes real labor market clearing mechanisms — medical residency match, school choice, and kidney exchange — making market design a branch of applied combinatorics.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "Stable matching (Gale-Shapley 1962): given preference lists of n workers and n firms, the deferred acceptance (DA) algorithm produces a stable matching — one in which no worker-firm pair mutually prefer each other over their current assignments — in O(n²) steps. Worker-proposing DA is worker-optimal",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-da-mechanism-welfare-improving-redesign"
      ],
      "communication_gap": "Gale and Shapley published in the American Mathematical Monthly as a mathematical puzzle (1962). The economic significance was recognized only by Roth (1984), who connected it to real NRMP data. Mathematicians rarely read economics journals, and economists rarely read combinatorics literature. The 2012 Nobel Prize substantially raised mutual awareness but the two communities still publish in separate venues.\n",
      "translation_table": [
        {
          "field_a_term": "stable matching (no blocking pair)",
          "field_b_term": "labor market equilibrium (no mutually beneficial unsatisfied worker-firm pair)",
          "note": "Stability is the market-design analog of Nash equilibrium for matching problems"
        },
        {
          "field_a_term": "deferred acceptance algorithm, O(n²)",
          "field_b_term": "NRMP residency match, school assignment mechanism",
          "note": "Real institutions implement a mathematical algorithm — often without knowing it"
        },
        {
          "field_a_term": "strategy-proof mechanism (truthful reporting is dominant strategy)",
          "field_b_term": "incentive-compatible market design",
          "note": "DA is strategy-proof for the proposing side; Boston mechanism is not"
        },
        {
          "field_a_term": "gross substitutes condition (Kelso-Crawford)",
          "field_b_term": "condition for competitive equilibrium existence with worker complementarities",
          "note": "Violated when firms value workers as complements — equilibrium may not exist"
        },
        {
          "field_a_term": "Shapley-Shubik assignment game core",
          "field_b_term": "set of competitive equilibrium wages",
          "note": "Core = competitive equilibria — cooperative and competitive concepts coincide"
        },
        {
          "field_a_term": "monopsony (single buyer)",
          "field_b_term": "wage below marginal product, employment below efficient level",
          "note": "Predicts observed wage suppression in geographically isolated labor markets"
        }
      ],
      "references": [
        {
          "doi": "10.2307/2312726",
          "note": "Gale & Shapley (1962) Am Math Mon 69:9 — deferred acceptance algorithm"
        },
        {
          "doi": "10.1086/261272",
          "note": "Roth (1984) J Polit Econ 92:991 — NRMP as stable matching"
        },
        {
          "doi": "10.2307/1913392",
          "note": "Kelso & Crawford (1982) Econometrica 50:1483 — labor markets with complementarities"
        },
        {
          "doi": "10.1007/BF01753437",
          "note": "Shapley & Shubik (1972) Int J Game Theory 1:111 — assignment game"
        }
      ],
      "last_reviewed": "2026-05-06",
      "file": "cross-domain/mathematics-social-science/b-matching-theory-labor-markets.yaml"
    },
    {
      "id": "b-network-formation-games",
      "title": "Jackson-Wolinsky connections models translate game-theoretic network formation into mathematical equilibrium theory, revealing the price of anarchy between stable and efficient networks",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "The Jackson-Wolinsky (1996) connections model provides a rigorous mathematical framework for social network formation: agents form links by mutual consent, each receiving benefit δ^d (where d is network distance) minus link cost c. The efficient network is a star when c is low but the Nash-stable ne",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-braess-paradox-social-network-cascades"
      ],
      "communication_gap": "Mathematical economists publish network formation theory in journals like Econometrica and Journal of Economic Theory; sociologists study social networks in Social Networks and American Sociological Review. The mathematical results on stability and efficiency are rarely transferred to empirical sociology, and empirical network data rarely inform theoretical refinements. Game theory training is uncommon in sociology PhD programs.\n",
      "translation_table": [
        {
          "field_a_term": "graph (mathematics)",
          "field_b_term": "social network (social science)",
          "note": "Abstract graph theory provides the formal language for social network analysis"
        },
        {
          "field_a_term": "Nash equilibrium (game theory)",
          "field_b_term": "pairwise stable network (network economics)",
          "note": "No agent can profitably deviate — the social stability concept maps to Nash"
        },
        {
          "field_a_term": "price of anarchy (computer science / math)",
          "field_b_term": "coordination failure / market failure (economics)",
          "note": "Both capture the gap between individually rational and collectively optimal outcomes"
        },
        {
          "field_a_term": "Braess paradox (graph theory)",
          "field_b_term": "perverse policy effects (social science)",
          "note": "Adding capacity (roads, bridges) can worsen systemic outcomes — counterintuitive in both domains"
        },
        {
          "field_a_term": "adjacency matrix eigenvector",
          "field_b_term": "social influence / centrality (sociology)",
          "note": "Katz-Bonacich centrality is the dominant eigenvector of the adjacency matrix"
        }
      ],
      "references": [
        {
          "doi": "10.1006/jeth.1996.0108",
          "note": "Jackson & Wolinsky (1996). A Strategic Model of Social and Economic Networks. J Econ Theory 71:44."
        },
        {
          "doi": "10.1111/1468-0262.00155",
          "note": "Bala & Goyal (2000). A Noncooperative Model of Network Formation. Econometrica 68:1181."
        },
        {
          "note": "Braess (1968). Über ein Paradoxon aus der Verkehrsplanung. Unternehmensforschung 12:258."
        },
        {
          "doi": "10.1016/j.jeconom.2009.04.006",
          "note": "Bramoulle, Djebbari & Fortin (2009). Identification of peer effects through social networks. J Econom 150:41."
        }
      ],
      "last_reviewed": "2026-05-06",
      "file": "cross-domain/mathematics-social-science/b-network-formation-games.yaml"
    },
    {
      "id": "b-network-formation-graph-theory",
      "title": "Strategic network formation (Jackson-Wolinsky pairwise stability) connects graph theory to social science: agents form links based on cost-benefit calculations, generating small-world and scale-free topologies from rational decisions, with efficient networks provably different from stable networks due to the tension between individual incentives and social welfare.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "STRATEGIC NETWORK FORMATION (Jackson & Wolinsky 1996): Agents form links g_ij ∈ {0,1} by mutual consent. Payoff to agent i:\n\n  u_i(g) = Σⱼ δ^d(i,j) - Σⱼ: g_ij=1 c\n\nwhere δ ∈ (0,1) = decay factor with distance d(i,j), c = link cost. Pairwise stable network: no agent wants to delete a link and no two ",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-network-topology-innovation-diffusion"
      ],
      "communication_gap": "Graph theorists (mathematics, CS) and social network analysts (sociology, economics) developed largely separate literatures. Jackson's economic network formation theory explicitly bridges these but requires fluency in both game theory and graph theory. The Barabási-Albert model from physics is widely used in social science without connecting to the strategic formation literature.\n",
      "translation_table": [
        {
          "field_a_term": "pairwise stable network (economics/game theory)",
          "field_b_term": "Nash equilibrium of link formation game",
          "note": "Pairwise stability is stronger than Nash — requires no bilateral deviations, not just unilateral"
        },
        {
          "field_a_term": "Erdős-Rényi phase transition (graph theory)",
          "field_b_term": "emergence of giant component (social network)",
          "note": "Giant component appears at p = 1/n — threshold at which a connected rumour/disease spreading path exists across the network"
        },
        {
          "field_a_term": "degree distribution P(k) (graph theory)",
          "field_b_term": "social influence distribution (social science)",
          "note": "Power-law degree distribution implies most influence concentrated in few hubs"
        },
        {
          "field_a_term": "clustering coefficient (graph theory)",
          "field_b_term": "bonding social capital (sociology/economics)",
          "note": "Redundant local connections = high clustering = bonding capital; Burt's structural holes = low clustering = bridging capital"
        }
      ],
      "references": [
        {
          "note": "Erdős & Rényi (1959) Publ Math Debrecen 6:290 — random graph theory"
        },
        {
          "doi": "10.1038/30918",
          "note": "Watts & Strogatz (1998) Nature 393:440 — small world networks"
        },
        {
          "doi": "10.1016/S0022-0531(96)90030-6",
          "note": "Jackson & Wolinsky (1996) J Econ Theory 71:44 — strategic network formation"
        },
        {
          "doi": "10.1126/science.286.5439.509",
          "note": "Barabási & Albert (1999) Science 286:509 — preferential attachment"
        }
      ],
      "last_reviewed": "2026-05-06",
      "file": "cross-domain/mathematics-social-science/b-network-formation-graph-theory.yaml"
    },
    {
      "id": "b-optimal-transport-economic-geography",
      "title": "Optimal transport theory (Kantorovich) and economic geography (Krugman core-periphery model) share the same mathematical structure ΓÇö spatial allocation of economic activity follows transport cost minimization, with bifurcations determining whether manufacturing concentrates or disperses.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "Kantorovich's optimal transport problem (minimize transport cost to move goods from producers to consumers) and Krugman's (1991) new economic geography share deep mathematical structure. Krugman's core-periphery model: two regions, two sectors (agriculture, manufacturing). Centripetal forces ΓÇö for",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-optimal-transport-determines-city-structure-spatial-equilibrium"
      ],
      "communication_gap": "Mathematical optimal transport theorists (Villani, Brenier, McCann) rarely interact with empirical trade economists or economic geographers. The gravity model and core-periphery model were developed in economics without awareness of the optimal transport literature; the mathematical connections were made later (Galichon 2016). Urban economists who use discrete choice methods rarely engage with continuous optimal transport geometry.\n",
      "translation_table": [
        {
          "field_a_term": "Monge-Kantorovich transport map",
          "field_b_term": "optimal spatial sorting of workers and firms across locations",
          "note": "the equilibrium spatial assignment maximizes welfare subject to transport costs"
        },
        {
          "field_a_term": "iceberg transport cost ╧ä (fraction surviving transit)",
          "field_b_term": "transport cost parameterization in Dixit-Stiglitz-Krugman model",
          "note": "iceberg formulation makes transport costs proportional to value ΓÇö tractable"
        },
        {
          "field_a_term": "Wasserstein distance W_p(╬╝, ╬╜)",
          "field_b_term": "spatial discrepancy between two city's economic distributions",
          "note": "WΓéé distance between housing price distributions summarizes spatial inequality differences"
        },
        {
          "field_a_term": "bifurcation at critical ╧ä_c (Krugman catastrophe)",
          "field_b_term": "phase transition in spatial economic equilibrium",
          "note": "below ╧ä_c, full agglomeration is only stable equilibrium; history determines which region wins"
        },
        {
          "field_a_term": "Wasserstein barycenter of multiple distributions",
          "field_b_term": "average spatial economic distribution; regional policy counterfactuals",
          "note": "barycenter minimizes total Wasserstein distance; useful for interpolating between city types"
        }
      ],
      "references": [
        {
          "doi": "10.1086/261763",
          "note": "Krugman (1991) Increasing returns and economic geography; J Polit Econ 99:483"
        },
        {
          "doi": "10.1257/aer.93.1.170",
          "note": "Anderson & van Wincoop (2003) Gravity with gravitas ΓÇö a solution to the border puzzle; Am Econ Rev 93:170"
        },
        {
          "note": "Galichon (2016) Optimal Transport Methods in Economics; Princeton University Press"
        },
        {
          "doi": "10.1257/aer.98.1.2008",
          "note": "Combes et al. (2008) Spatial wage disparities ΓÇö sorting matters!; Am Econ Rev 98:2008"
        }
      ],
      "last_reviewed": "2026-05-07",
      "file": "cross-domain/mathematics-social-science/b-optimal-transport-economic-geography.yaml"
    },
    {
      "id": "b-replicator-dynamics-ess",
      "title": "The replicator equation — governing strategy frequency evolution in evolutionary games — is formally equivalent to Fisher's selection equation in population genetics, Lotka-Volterra predator-prey dynamics, and chemical reaction kinetics, creating a unified dynamical framework spanning biology, mathematics, economics, and social science.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "The replicator equation (Taylor & Jonker 1978): ẋᵢ = xᵢ[fᵢ(x) - φ(x)], where xᵢ is the frequency of strategy i, fᵢ(x) = Σⱼaᵢⱼxⱼ is the fitness of strategy i (given payoff matrix A), and φ(x) = Σᵢxᵢfᵢ(x) is the mean fitness of the population. The equation simply says: a strategy grows in frequency if",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-replicator-dynamics-ess-institutional-design"
      ],
      "communication_gap": "Taylor & Jonker published in Mathematical Biosciences (applied mathematics); Maynard Smith published in Nature and in game theory monographs (biology). The connection to economics (Nash equilibria) was recognised slowly: Nash (1950) did not know about evolution, and biologists did not read economics. The Lotka-Volterra equivalence (Hofbauer 1981, in Journal of Mathematical Biology) was proven in a specialised mathematical biology journal that economists and physicists rarely read. The Price equation connecting to statistics (covariance) was published in Nature (1970) but rarely cited outside evolutionary biology for 20 years. Social scientists applying replicator dynamics to cultural evolution (Cavalli-Sforza, Boyd & Richerson) developed largely independently of the mathematical biology literature.\n",
      "translation_table": [
        {
          "field_a_term": "strategy frequency xᵢ",
          "field_b_term": "allele frequency in population genetics / species abundance in ecology"
        },
        {
          "field_a_term": "payoff matrix A (fitness of i against j)",
          "field_b_term": "Lotka-Volterra competition matrix / chemical stoichiometry matrix"
        },
        {
          "field_a_term": "mean fitness φ(x) = Σxᵢfᵢ",
          "field_b_term": "mean fitness (Fisher) / normalisation in chemical concentration space"
        },
        {
          "field_a_term": "Nash equilibrium (best response fixed point)",
          "field_b_term": "stable allele frequency / Lotka-Volterra coexistence equilibrium"
        },
        {
          "field_a_term": "evolutionarily stable strategy (ESS)",
          "field_b_term": "asymptotically stable equilibrium of selection dynamics"
        },
        {
          "field_a_term": "rock-paper-scissors game (no ESS, limit cycle)",
          "field_b_term": "predator-prey oscillation (Lotka-Volterra cycle) / competitive exclusion cycle"
        },
        {
          "field_a_term": "Price equation ΔZ̄ = Cov(w,z)/w̄",
          "field_b_term": "Fisher's fundamental theorem / KL-divergence minimisation"
        }
      ],
      "references": [
        {
          "doi": "10.1016/0025-5564(78)90077-9",
          "note": "Taylor & Jonker (1978) Math Biosci 40:145 — replicator equation"
        },
        {
          "note": "Maynard Smith (1982) Evolution and the Theory of Games. Cambridge University Press"
        },
        {
          "note": "Hofbauer & Sigmund (1998) Evolutionary Games and Population Dynamics. Cambridge"
        },
        {
          "note": "Sandholm (2011) Population Games and Evolutionary Dynamics. MIT Press"
        }
      ],
      "last_reviewed": "2026-05-06",
      "file": "cross-domain/mathematics-social-science/b-replicator-dynamics-ess.yaml"
    },
    {
      "id": "b-spatial-statistics-geographic-inequality",
      "title": "Tobler's first law, Moran's I spatial autocorrelation, and Kriging formalise geographic proximity effects that economic geography rediscovered independently as agglomeration externalities — Krugman's core-periphery bifurcation is a phase transition in the same spatial autocorrelation parameter space.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "Spatial statistics and economic geography have independently developed formal frameworks for the same underlying phenomenon: proximity creates autocorrelation in socioeconomic outcomes, and self-reinforcing mechanisms can lock in geographic inequality. The mathematical bridge runs between Moran's I ",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-krugman-bifurcation-detectable-moran-I-trajectory"
      ],
      "communication_gap": "Geostatistics developed in the mining and petroleum industry (Krige 1951, Matheron 1963) independently of spatial econometrics (Anselin 1988) and economic geography (Krugman 1991). The identity between Kriging and Gaussian process regression was not widely recognised in statistics until the 2000s (Rasmussen & Williams 2006). Economists rarely cite Moran (1950) or Tobler (1970). Regional scientists use spatial econometrics but rarely engage with geostatistical variogram theory. The Krugman bifurcation is rarely framed as a phase transition in statistical physics language, despite the identical mathematics.\n",
      "translation_table": [
        {
          "field_a_term": "Moran's I spatial autocorrelation coefficient",
          "field_b_term": "agglomeration intensity in economic geography",
          "note": "both measure the degree to which high values cluster near high values"
        },
        {
          "field_a_term": "spatial weight matrix wᵢⱼ",
          "field_b_term": "transport cost matrix Tᵢⱼ in trade models",
          "note": "both formalise the decay of interaction strength with distance"
        },
        {
          "field_a_term": "variogram γ(h) in geostatistics",
          "field_b_term": "distance decay of economic spillovers (localisation of knowledge)",
          "note": "variogram fit determines the range of spatial dependence — economic geography's knowledge spillover radius"
        },
        {
          "field_a_term": "Kriging interpolation (BLUP of unobserved locations)",
          "field_b_term": "counterfactual regional income prediction in policy evaluation",
          "note": "Kriging gives the best linear estimate of what income would be absent a policy intervention"
        },
        {
          "field_a_term": "critical transport cost T* (Krugman bifurcation)",
          "field_b_term": "spatial phase transition from dispersion to agglomeration",
          "note": "same mathematics as second-order phase transition; symmetry breaking selects the core region"
        },
        {
          "field_a_term": "spatial lag model ρ parameter",
          "field_b_term": "Tobler spatial autocorrelation in economic outcome variables",
          "note": "ρ > 0 means neighbours' income predicts own income — formalised Tobler's law"
        }
      ],
      "references": [
        {
          "doi": "10.2307/143141",
          "note": "Tobler (1970) Econ Geogr 46:234 — first law of geography"
        },
        {
          "doi": "10.1093/biomet/37.1-2.17",
          "note": "Moran (1950) Biometrika 37:17 — spatial autocorrelation statistic"
        },
        {
          "doi": "10.1086/261763",
          "note": "Krugman (1991) J Polit Econ 99:483 — increasing returns and economic geography"
        },
        {
          "note": "Anselin (1988) Spatial Econometrics. Kluwer Academic Publishers, Dordrecht"
        }
      ],
      "last_reviewed": "2026-05-06",
      "file": "cross-domain/mathematics-social-science/b-spatial-statistics-geographic-inequality.yaml"
    },
    {
      "id": "b-graph-laplacian-manifold-learning-x-cryoem-conformational-maps",
      "title": "Graph-Laplacian manifold learning bridges spectral geometry and cryo-EM conformational landscape reconstruction.",
      "source_domain": "",
      "target_domain": "",
      "status": "proposed",
      "bridge_claim": "Cryo-EM particle images sample continuous conformational variation; Laplacian eigenmaps provide a mathematically grounded coordinate system for this manifold. The bridge is strong but still partly speculative for highly heterogeneous complexes.",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-laplacian-eigenmodes-improve-cryoem-conformation-clustering"
      ],
      "communication_gap": "Adjacent communities use different software stacks and validation norms, so mathematically equivalent tools are often rediscovered in parallel.",
      "translation_table": [
        {
          "field_a_term": "Graph Laplacian eigenvectors",
          "field_b_term": "Low-dimensional conformational coordinates",
          "note": "Encodes smooth transitions between particle states."
        },
        {
          "field_a_term": "Spectral gap",
          "field_b_term": "Conformation cluster separability",
          "note": "Large gaps suggest robust metastable states."
        },
        {
          "field_a_term": "Heat-kernel neighborhood graph",
          "field_b_term": "Particle similarity graph",
          "note": "Controls local geometry versus noise robustness."
        }
      ],
      "references": [
        {
          "doi": "10.1073/pnas.1102826108",
          "note": "Nicolau et al. (2011) topology-based biomedical subgroup discovery."
        },
        {
          "doi": "10.1090/bull/1506",
          "note": "Carlsson (2009) topology and data overview."
        }
      ],
      "last_reviewed": "2026-05-08",
      "file": "cross-domain/mathematics-structural-biology/b-graph-laplacian-manifold-learning-x-cryoem-conformational-maps.yaml"
    },
    {
      "id": "b-ddpm-x-accelerated-mri-inverse-reconstruction",
      "title": "Diffusion probabilistic models bridge score-based generative priors and accelerated MRI inverse reconstruction under undersampling.",
      "source_domain": "",
      "target_domain": "",
      "status": "proposed",
      "bridge_claim": "Speculative analogy (to be empirically validated): DDPM score fields can act as learned regularizers in MRI inverse problems, replacing hand-crafted priors while preserving fidelity constraints from scanner physics.",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-ddpm-priors-reduce-mri-reconstruction-error-at-fixed-dose"
      ],
      "communication_gap": "Generative-model papers optimize perceptual metrics, while radiology pipelines require pathology-preserving reliability and auditability.",
      "translation_table": [
        {
          "field_a_term": "score function",
          "field_b_term": "inverse-problem regularizer gradient",
          "note": "Both guide reconstruction toward high-likelihood anatomical structure."
        },
        {
          "field_a_term": "diffusion denoising trajectory",
          "field_b_term": "iterative reconstruction refinement",
          "note": "Stepwise denoising parallels iterative consistency updates."
        },
        {
          "field_a_term": "data-consistency projection",
          "field_b_term": "k-space physics constraint",
          "note": "Reconstructions are projected back to measured k-space samples."
        }
      ],
      "references": [
        {
          "arxiv": "2006.11239",
          "note": "Denoising Diffusion Probabilistic Models."
        },
        {
          "url": "https://mridata.org/",
          "note": "fastMRI ecosystem and accelerated MRI benchmarking context."
        }
      ],
      "last_reviewed": "2026-05-08",
      "file": "cross-domain/medical-imaging-machine-learning/b-ddpm-x-accelerated-mri-inverse-reconstruction.yaml"
    },
    {
      "id": "b-electrical-impedance-tomography-x-fisher-information-design",
      "title": "Electrical impedance tomography (EIT) inverse reconstruction quality is strongly shaped by Fisher-information geometry induced by electrode placement and drive patterns.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "proposed",
      "bridge_claim": "EIT solves a severely ill-posed boundary-value inverse problem where measurement design can be as important as reconstruction algorithm choice. Fisher-information analysis provides a principled bridge from experimental setup (electrode geometry, current patterns) to expected parameter uncertainty an",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-fisher-information-optimized-eit-electrodes-improve-lesion-detectability"
      ],
      "communication_gap": "EIT studies typically benchmark reconstruction error post hoc, while design-theoretic analyses can predict uncertainty before data collection.\n",
      "translation_table": [
        {
          "field_a_term": "Jacobian of forward EIT map",
          "field_b_term": "Fisher-information matrix under assumed noise model",
          "note": "Sensitivity structure determines local identifiability and variance lower bounds."
        },
        {
          "field_a_term": "electrode drive/measurement protocol",
          "field_b_term": "optimal experiment design objective",
          "note": "Protocols can be optimized for lesion detectability instead of only reconstruction sharpness."
        },
        {
          "field_a_term": "regularization and prior structure",
          "field_b_term": "posterior covariance and credible-region geometry",
          "note": "Prior-informed design can focus information where clinical decisions need confidence."
        }
      ],
      "references": [
        {
          "doi": "10.1017/S0962492910000061",
          "note": "Bayesian inverse-problem framework relevant for uncertainty-aware EIT design and inference."
        }
      ],
      "last_reviewed": "2026-05-08",
      "file": "cross-domain/medical-imaging-mathematics/b-electrical-impedance-tomography-x-fisher-information-design.yaml"
    },
    {
      "id": "b-persistent-homology-x-microscopy-noise-topology-qc",
      "title": "Persistent homology summaries bridge algebraic topology with microscopy pipelines where segmentation quality can be audited via stability of topological signal under imaging noise.",
      "source_domain": "",
      "target_domain": "",
      "status": "proposed",
      "bridge_claim": "Literature-backed mapping (topological data analysis): persistence diagrams quantify stable multiscale features and their stability under bounded geometric noise; speculative analogy for deployment (requires validation per modality): persistence-based QC metrics may flag segmentation failures when b",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-multiscale-filtration-persistence-improves-microscopy-segmentation-qc"
      ],
      "communication_gap": "Topology papers emphasize asymptotic stability while microscopy labs rely on pixel-wise metrics with decades of institutional precedent.",
      "translation_table": [
        {
          "field_a_term": "filtration parameter (scale)",
          "field_b_term": "smoothing kernel width / PSF scale",
          "note": "Scale choices interact with optics-limited features."
        },
        {
          "field_a_term": "bottleneck distance between diagrams",
          "field_b_term": "QC deviation score versus reference template",
          "note": "Stability theory motivates distance thresholds only under explicit sampling assumptions."
        },
        {
          "field_a_term": "spurious short intervals",
          "field_b_term": "noise-induced topological artifacts",
          "note": "Thresholding heuristics remain modality dependent."
        }
      ],
      "references": [
        {
          "doi": "10.1145/997817.997855",
          "note": "Stability of persistence diagrams (foundational stability result motivating noise-aware QC language)."
        }
      ],
      "last_reviewed": "2026-05-09",
      "file": "cross-domain/medical-imaging-mathematics/b-persistent-homology-x-microscopy-noise-topology-qc.yaml"
    },
    {
      "id": "b-bayesian-inverse-imaging-x-uncertainty-quantification",
      "title": "Bayesian inverse imaging translates PDE-constrained reconstruction into posterior uncertainty maps, bridging deterministic regularization and statistical calibration.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "proposed",
      "bridge_claim": "Many imaging reconstructions solve ill-posed inverse problems with hand-tuned penalties, while Bayesian inverse methods place priors on latent fields and infer posterior distributions that expose uncertainty, not only point estimates. This creates a direct bridge between classical regularization and",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-hierarchical-bayesian-priors-improve-imaging-inverse-coverage"
      ],
      "communication_gap": "Imaging papers often report reconstruction quality metrics only, while UQ work emphasizes posterior validity; practitioners need both in a single operational protocol.\n",
      "translation_table": [
        {
          "field_a_term": "Tikhonov/TV regularization parameter",
          "field_b_term": "prior precision and structure in Bayesian models",
          "note": "Hyperpriors convert fixed tuning into inferable uncertainty scales."
        },
        {
          "field_a_term": "inverse problem non-uniqueness",
          "field_b_term": "posterior multimodality and credible sets",
          "note": "Identifiability limits become explicit rather than hidden in a single reconstruction."
        },
        {
          "field_a_term": "forward-model mismatch",
          "field_b_term": "hierarchical likelihood error models",
          "note": "Model discrepancy can be propagated to voxelwise uncertainty."
        }
      ],
      "references": [
        {
          "doi": "10.1017/S0962492910000061",
          "note": "Stuart (2010), Bayesian inverse problems: foundations and algorithmic implications."
        }
      ],
      "last_reviewed": "2026-05-08",
      "file": "cross-domain/medical-imaging-statistics/b-bayesian-inverse-imaging-x-uncertainty-quantification.yaml"
    },
    {
      "id": "b-transformer-attention-x-longitudinal-ehr-reasoning",
      "title": "Transformer attention bridges sequence transduction and longitudinal EHR reasoning over heterogeneous clinical events.",
      "source_domain": "",
      "target_domain": "",
      "status": "proposed",
      "bridge_claim": "Speculative analogy (to be empirically validated): self-attention can unify sparse longitudinal clinical events into context-aware risk representations similarly to flexible sequence transduction in language modeling.",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-transformer-temporal-attention-improves-ehr-risk-stratification"
      ],
      "communication_gap": "NLP success metrics do not directly transfer to clinical utility, calibration, and interpretability requirements.",
      "translation_table": [
        {
          "field_a_term": "token embedding",
          "field_b_term": "clinical event representation",
          "note": "Labs, meds, and diagnoses become event tokens with temporal metadata."
        },
        {
          "field_a_term": "self-attention weight",
          "field_b_term": "contextual clinical relevance score",
          "note": "Weights reflect conditional importance under prediction targets."
        },
        {
          "field_a_term": "positional encoding",
          "field_b_term": "irregular temporal-gap encoding",
          "note": "Time-aware encodings retain event order and spacing."
        }
      ],
      "references": [
        {
          "arxiv": "1706.03762",
          "note": "Attention Is All You Need."
        },
        {
          "url": "https://www.healthit.gov/topic/scientific-initiatives/precision-medicine/electronic-health-records",
          "note": "EHR infrastructure context."
        }
      ],
      "last_reviewed": "2026-05-08",
      "file": "cross-domain/medicine-machine-learning/b-transformer-attention-x-longitudinal-ehr-reasoning.yaml"
    },
    {
      "id": "b-diffusion-mri-x-effective-medium-tortuosity",
      "title": "Diffusion MRI and effective-medium physics meet in tortuosity models: water diffusion in tissue is treated as transport through a heterogeneous, restricted medium whose apparent diffusion encodes geometry, barriers, and compartment exchange.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "The bridge maps MRI-derived apparent diffusion to effective transport parameters, but it is not a direct microscope of tissue microstructure. Identifiability depends on acquisition protocol, model assumptions, and validation against phantoms or histology.\n",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-multi-shell-dmri-estimates-track-phantom-tortuosity"
      ],
      "communication_gap": "Clinicians read diffusion metrics as imaging biomarkers, while physicists emphasize inverse-problem nonuniqueness and effective-medium assumptions.\n",
      "translation_table": [
        {
          "field_a_term": "apparent diffusion coefficient or diffusion tensor",
          "field_b_term": "effective diffusivity in heterogeneous media",
          "note": "Both summarize transport."
        },
        {
          "field_a_term": "restricted diffusion in axons or pores",
          "field_b_term": "tortuosity and obstacle geometry",
          "note": "Geometry is inferred indirectly."
        },
        {
          "field_a_term": "multi-shell acquisition",
          "field_b_term": "length-scale-dependent transport probe",
          "note": "Protocol choice affects identifiability."
        }
      ],
      "references": [
        {
          "doi": "10.1016/S0006-3495(94)80775-1",
          "note": "Basser, Mattiello and LeBihan (1994) diffusion tensor MRI framework."
        }
      ],
      "last_reviewed": "2026-05-07",
      "file": "cross-domain/medicine-physics/b-diffusion-mri-x-effective-medium-tortuosity.yaml"
    },
    {
      "id": "b-radiation-biophysics-let",
      "title": "The biological effectiveness of ionising radiation — from DNA strand break probability to tumour control — is quantitatively predicted by the Bethe-Bloch stopping power formula: the linear energy transfer (LET) framework bridges quantum electrodynamics track structure to radiobiological effectiveness (RBE) and clinical tumour control probability (TCP) in proton and heavy-ion cancer therapy.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "The Bethe-Bloch formula (Bethe 1930, Bloch 1933) gives the mean energy loss per unit path length for a charged particle traversing matter:\n  -dE/dx = (4πe⁴z²N_A Z)/(m_e v² A) × [ln(2m_e v²/I) - ln(1-β²) - β²]\nwhere z is the projectile charge, v its velocity, Z/A the atomic number/mass ratio of the a",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-microdosimetry-dna-cluster-repair-threshold"
      ],
      "communication_gap": "The Bethe-Bloch formula is a standard result of quantum electrodynamics and nuclear physics, taught in particle physics courses. Radiation biologists who study DNA damage and cell survival typically have little training in particle physics and use LET as an empirical parameter rather than deriving it from Bethe-Bloch. Radiation oncologists use clinical models (LQ, TCP, NTCP) with no exposure to the underlying track-structure physics. Conversely, physicists developing next- generation particle therapy accelerators rarely engage with the molecular radiobiology literature. The chain from QED to clinical outcome exists but is rarely traversed as a whole.\n",
      "translation_table": [
        {
          "field_a_term": "Stopping power -dE/dx (Bethe-Bloch formula)",
          "field_b_term": "Linear Energy Transfer LET (keV/μm)",
          "note": "LET = -dE/dx restricted to local energy deposition; delta-ray cutoff distinguishes LET from stopping power"
        },
        {
          "field_a_term": "Bragg peak (1/v² increase near particle stop)",
          "field_b_term": "Distal dose peak in clinical proton/carbon therapy",
          "note": "The Bragg peak allows tumour-localised dose delivery — the primary clinical advantage"
        },
        {
          "field_a_term": "Particle track structure (radial dose profile)",
          "field_b_term": "Spatial pattern of DNA double-strand breaks",
          "note": "Dense track core → clustered DSBs → unrepairable lethal lesions at high LET"
        },
        {
          "field_a_term": "Relative biological effectiveness RBE",
          "field_b_term": "Dose-equivalent biological potency of high-LET vs X-ray",
          "note": "RBE peaks at LET ≈ 100-200 keV/μm; predicted by TDRA model from track structure"
        },
        {
          "field_a_term": "Linear-quadratic survival model S = exp(-αD - βD²)",
          "field_b_term": "Tumour cell killing probability as a function of radiation dose",
          "note": "α/β ratio encodes LET-dependent repair capacity; high LET → α dominates (β → 0)"
        },
        {
          "field_a_term": "Tumour control probability TCP = (1-S)^N",
          "field_b_term": "Clinical probability of local tumour control",
          "note": "Links Bethe-Bloch track physics to measurable clinical endpoint"
        },
        {
          "field_a_term": "Normal tissue complication probability NTCP",
          "field_b_term": "Probability of radiation-induced side effects in healthy organs",
          "note": "Minimised by exploiting the Bragg peak dose localisation from Bethe-Bloch physics"
        }
      ],
      "references": [
        {
          "note": "Bethe (1930) Ann Phys 5:325 — Zur Theorie des Durchgangs schneller Korpuskularstrahlen durch Materie; original stopping power formula",
          "url": "https://doi.org/10.1002/andp.19303970303"
        },
        {
          "note": "Bloch (1933) Z Phys 81:363 — extension of Bethe formula to high velocities",
          "url": "https://doi.org/10.1007/BF01344553"
        },
        {
          "doi": "10.1016/S0960-9776(09)70024-4",
          "note": "Kellerer & Rossi (1972) Curr Top Radiat Res Q 8:85 — Theory of Dual Radiation Action; TDRA model connecting track structure to RBE"
        },
        {
          "note": "ICRU Report 49 (1993) — Stopping Powers and Ranges for Protons and Alpha Particles; authoritative Bethe-Bloch tabulations for medical physics",
          "url": "https://www.icru.org/report/stopping-powers-and-ranges-for-protons-and-alpha-particles-report-49/"
        },
        {
          "doi": "10.1259/bjr/57185823",
          "note": "Bentzen & Joiner (2009) in Basic Clinical Radiobiology — linear-quadratic model and clinical fractionation; α/β ratios for tumour and normal tissues"
        }
      ],
      "last_reviewed": "2026-05-05",
      "file": "cross-domain/medicine-physics/b-radiation-biophysics-let.yaml"
    },
    {
      "id": "b-renewal-processes-x-hospital-readmission-burst-modeling",
      "title": "Renewal and self-exciting process models bridge stochastic event timing and hospital readmission burst forecasting.",
      "source_domain": "",
      "target_domain": "",
      "status": "proposed",
      "bridge_claim": "Speculative analogy: Readmission clusters can be represented with renewal kernels and self-excitation terms to separate baseline chronic risk from post-discharge contagion-like cascades.",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-self-exciting-renewal-models-improve-readmission-burst-forecasting"
      ],
      "communication_gap": "Communities use different terminology and validation conventions, masking transferable method equivalence.",
      "translation_table": [],
      "references": [
        {
          "doi": "10.1093/biomet/58.1.83",
          "note": "Hawkes process foundations for self-excitation."
        }
      ],
      "last_reviewed": "2026-05-08",
      "file": "cross-domain/medicine-statistics/b-renewal-processes-x-hospital-readmission-burst-modeling.yaml"
    },
    {
      "id": "b-atmospheric-convection-lorenz-chaos",
      "title": "Lorenz derived his famous chaotic attractor from a three-mode truncation of the Navier-Stokes equations for Rayleigh-Benard convection, making atmospheric convection the physical origin of deterministic chaos and the butterfly effect in weather prediction.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "Lorenz (1963) truncated the Oberbeck-Boussinesq equations for thermal convection in a fluid layer heated from below to three Fourier modes (X, Y, Z), obtaining dX/dt = sigma*(Y-X), dY/dt = X*(r-Z)-Y, dZ/dt = X*Y - b*Z; for sigma = 10, b = 8/3, r = 28 this system exhibits a strange attractor with pos",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-lorenz-attractor-seasonal-forecast-skill"
      ],
      "communication_gap": "Meteorologists develop weather forecasting systems empirically while dynamical systems theorists analyze attractors abstractly; after Lorenz (1963) the two communities developed separately, and modern operational weather forecasting still rarely uses the geometric attractor framework from dynamical systems theory explicitly, despite its fundamental relevance.\n",
      "translation_table": [
        {
          "field_a_term": "Rayleigh number Ra = g*alpha*DeltaT*H^3/(nu*kappa) (fluid mechanics)",
          "field_b_term": "control parameter r = Ra/Ra_c in Lorenz equations (dynamical systems)",
          "note": "Convection onset at Ra_c; chaos appears at r ~ 24.74 in the Lorenz model"
        },
        {
          "field_a_term": "convective overturning cells (meteorology)",
          "field_b_term": "trajectory on the Lorenz strange attractor (dynamical systems)",
          "note": "The two lobes of the Lorenz attractor correspond to the two possible convective circulation directions"
        },
        {
          "field_a_term": "weather predictability horizon (meteorology)",
          "field_b_term": "inverse of maximum Lyapunov exponent 1/lambda_1 (dynamical systems)",
          "note": "Lyapunov exponent sets the error doubling time; finite precision limits forecast horizon"
        },
        {
          "field_a_term": "ensemble weather forecasting (meteorology)",
          "field_b_term": "probability distribution evolution on attractor (dynamical systems)",
          "note": "Ensemble spread tracks the divergence of nearby trajectories on the chaotic attractor"
        }
      ],
      "references": [
        {
          "doi": "10.1175/1520-0469(1963)020<0130:DNF>2.0.CO;2",
          "note": "Lorenz (1963) - deterministic nonperiodic flow; original Lorenz attractor paper"
        },
        {
          "doi": "10.1175/1520-0469(1969)026<0636:APPF>2.0.CO;2",
          "note": "Lorenz (1969) - atmospheric predictability as revealed by naturally occurring analogues"
        },
        {
          "doi": "10.1175/BAMS-D-13-00231.1",
          "note": "Palmer (2014) - towards probabilistic earth-system modelling using ensemble forecasting"
        }
      ],
      "last_reviewed": "2026-05-07",
      "file": "cross-domain/meteorology-dynamical-systems/b-atmospheric-convection-lorenz-chaos.yaml"
    },
    {
      "id": "b-gut-microbiome-ecological-succession",
      "title": "The human gut microbiome assembles and recovers from perturbation (antibiotics, diet) following the same ecological succession rules as macro-ecosystems, with priority effects, keystone species, and alternative stable states.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "Gut microbial community assembly follows Lotka-Volterra competition dynamics: early colonizers modify the environment (pH, oxygen, metabolites) to facilitate or inhibit later arrivals (facilitation/inhibition succession models). After antibiotic perturbation, recovery trajectories show priority effe",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-fecal-transplant-alternative-stable-state-reset"
      ],
      "communication_gap": "Microbiologists and macro-ecologists rarely collaborate despite sharing identical mathematical frameworks; the emerging field of microbial ecology uses ecological theory but often reinvents tools already available in classical ecology; clinical microbiologists remain largely unaware of tipping-point dynamics from ecosystem ecology.\n",
      "translation_table": [
        {
          "field_a_term": "microbial community composition after antibiotics (microbiology)",
          "field_b_term": "post-disturbance successional trajectory (ecology)",
          "note": "Both follow deterministic assembly rules overlaid with stochastic colonization order"
        },
        {
          "field_a_term": "keystone microbe (e.g., Akkermansia muciniphila) (microbiology)",
          "field_b_term": "keystone species / ecosystem engineer (ecology)",
          "note": "Disproportionate community-structuring effect relative to abundance in both systems"
        },
        {
          "field_a_term": "dysbiosis / alternative microbiome state (microbiology)",
          "field_b_term": "alternative stable state / tipping point (ecology)",
          "note": "Both are multiple attractors in Lotka-Volterra state space separated by unstable equilibria"
        },
        {
          "field_a_term": "bacteriocin production / competitive exclusion (microbiology)",
          "field_b_term": "allelopathy / interference competition (ecology)",
          "note": "Direct inhibition mechanisms have the same mathematical effect on coexistence conditions"
        }
      ],
      "references": [
        {
          "doi": "10.1038/nature11550",
          "note": "Lozupone et al. (2012) — diversity, stability and resilience of the human gut microbiota"
        },
        {
          "doi": "10.1016/j.cell.2012.10.038",
          "note": "Sonnenburg & Fischbach (2011) — community health and the human microbiome"
        },
        {
          "doi": "10.1038/s41559-021-01428-w",
          "note": "Gould et al. (2018) — microbiome assembly follows ecological succession rules"
        }
      ],
      "last_reviewed": "2026-05-07",
      "file": "cross-domain/microbiology-ecology/b-gut-microbiome-ecological-succession.yaml"
    },
    {
      "id": "b-microbe-mineral-geochemical-cycling",
      "title": "Microbial communities at mineral surfaces catalyze geochemical cycling reactions (iron, sulfur, carbon, phosphorus) at rates orders of magnitude faster than abiotic processes, functioning as biological electron-transfer mediators that control global elemental budgets\n",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "Microorganisms accelerate mineral dissolution and precipitation by producing organic acids, siderophores, and extracellular electron shuttles that lower activation energies for mineral surface reactions; iron-reducing bacteria (Geobacter, Shewanella) transfer electrons to Fe(III) mineral surfaces at",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-microbial-iron-reduction-sediment-carbon-preservation"
      ],
      "communication_gap": "Microbiologists study cellular metabolism and community ecology while geochemists model element fluxes and mineral equilibria; quantitative integration of microbial metabolic rates into geochemical reactive transport models remains limited by the difficulty of measuring in situ microbial activity.\n",
      "translation_table": [
        {
          "field_a_term": "microbial electron donor/acceptor coupling (microbiology)",
          "field_b_term": "coupled redox half-reaction at mineral surface (geochemistry)",
          "note": "Microbes couple H2/acetate oxidation to Fe(III)/SO4 reduction, catalyzing geochemical redox disequilibria"
        },
        {
          "field_a_term": "siderophore secretion for iron acquisition (microbiology)",
          "field_b_term": "ligand-promoted dissolution of iron oxide minerals (geochemistry)",
          "note": "Siderophores form soluble Fe(III) complexes that detach from mineral surfaces, increasing dissolution rate"
        },
        {
          "field_a_term": "biofilm-mineral interface (microbiology)",
          "field_b_term": "reactive mineral surface modified by organic coatings (geochemistry)",
          "note": "Biofilm EPS alters mineral surface chemistry, wettability, and reactivity; dissolution rates differ under biofilm vs. abiotic conditions"
        },
        {
          "field_a_term": "microbial community metabolic flux (microbiology)",
          "field_b_term": "geochemical reaction rate in a biogeochemical model (geochemistry)",
          "note": "Microbial population dynamics control the rate and direction of geochemical transformations in sediments"
        }
      ],
      "references": [
        {
          "doi": "10.1126/science.1154529",
          "note": "Lovley (2008) - extracellular electron transfer: wires, capacitors, iron lungs and more"
        },
        {
          "doi": "10.1038/nature03051",
          "note": "Banfield et al. (2005) - the importance of microbial diversity in biogeochemical cycling"
        }
      ],
      "last_reviewed": "2026-05-07",
      "file": "cross-domain/microbiology-geochemistry/b-microbe-mineral-geochemical-cycling.yaml"
    },
    {
      "id": "b-antibiotic-tolerance-persister-switching",
      "title": "Antibiotic tolerance in bacterial biofilms arises from phenotypic switching to a metabolically dormant persister state: the switching dynamics are a two-state stochastic process (ON-OFF) with memory, mathematically equivalent to a Markov-modulated Poisson process that determines the size and persistence of the tolerant subpopulation.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "Persisters are rare bacterial cells (~10^{-5} of population) that survive antibiotic killing not through resistance (heritable genetic change) but through tolerance (transient physiological dormancy). Balaban et al. (2004) showed that persister formation follows a stochastic switching model: individ",
      "open_unknowns": [],
      "related_hypotheses": [],
      "communication_gap": "Microbiologists describe persister formation mechanistically (toxin-antitoxin modules, SOS response) without fitting quantitative stochastic models; applied mathematicians studying two-state Markov chains rarely consider bacterial persistence as a model system. Clinical implications (persister-mediated recurrent infection) require both quantitative modelling and microbiology.\n",
      "translation_table": [
        {
          "field_a_term": "two-state Markov chain (mathematics)",
          "field_b_term": "normal-to-persister and persister-to-normal switching (microbiology)",
          "note": "Switching rates alpha (to persister) and beta (to normal) fully characterise persister dynamics"
        },
        {
          "field_a_term": "stationary distribution pi = alpha/(alpha+beta) (mathematics)",
          "field_b_term": "steady-state persister fraction (~10^{-5} to 10^{-3}) (microbiology)",
          "note": "Persister fraction set by switching rate ratio; controllable by stress conditions"
        },
        {
          "field_a_term": "biphasic exponential decay (mathematics)",
          "field_b_term": "antibiotic killing curve with fast and slow phases (microbiology)",
          "note": "Phase I: normal cells die at rate k_kill; Phase II: persisters die at rate ~ 0 until resuscitation"
        },
        {
          "field_a_term": "stochastic fluctuations in gene expression (mathematics)",
          "field_b_term": "noise-driven persister formation independent of external signals (microbiology)",
          "note": "Toxin-antitoxin (TA) module noise drives stochastic dormancy; not triggered by stress alone"
        }
      ],
      "references": [
        {
          "doi": "10.1126/science.1099538",
          "note": "Balaban et al. (2004) - bacterial persistence as a phenotypic switch; foundational stochastic model"
        },
        {
          "doi": "10.1038/nrmicro2543",
          "note": "Lewis (2010) - persister cells; annual review"
        },
        {
          "doi": "10.1016/j.cell.2011.02.043",
          "note": "Maisonneuve & Gerdes (2014) - molecular mechanisms underlying bacterial persisters"
        }
      ],
      "last_reviewed": "2026-05-07",
      "file": "cross-domain/microbiology-mathematics/b-antibiotic-tolerance-persister-switching.yaml"
    },
    {
      "id": "b-lotka-volterra-competition-x-phage-bacteria-chemostat-control",
      "title": "Lotka-Volterra competition dynamics offer a control-theoretic bridge for phage-bacteria chemostat regulation.",
      "source_domain": "",
      "target_domain": "",
      "status": "proposed",
      "bridge_claim": "Speculative analogy: Lotka-Volterra competition dynamics offer a control-theoretic bridge for phage-bacteria chemostat regulation.",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-lotka-volterra-informed-feedback-control-delays-phage-resistance-dominance"
      ],
      "communication_gap": "The two communities use different notation, benchmarks, and publication venues, which obscures transferable mathematical structure.",
      "translation_table": [],
      "references": [
        {
          "url": "https://pmc.ncbi.nlm.nih.gov/articles/PMC2603284/",
          "note": "Phage-bacteria dynamics and resistance."
        }
      ],
      "last_reviewed": "2026-05-08",
      "file": "cross-domain/microbiology-mathematics/b-lotka-volterra-competition-x-phage-bacteria-chemostat-control.yaml"
    },
    {
      "id": "b-sindy-sparse-discovery-x-host-pathogen-dynamics",
      "title": "Sparse governing-equation discovery links dynamical-systems identification and host-pathogen interaction modeling.",
      "source_domain": "",
      "target_domain": "",
      "status": "proposed",
      "bridge_claim": "Speculative analogy: SINDy-style sparse equation discovery can recover low-dimensional host-pathogen interaction dynamics that are typically hand-specified in microbiology models.",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-sindy-guided-control-policies-delay-phage-resistance-takeover"
      ],
      "communication_gap": "Microbiology studies prioritize mechanistic interpretability while sparse-identification communities optimize symbolic recovery, leaving limited shared evaluation standards.",
      "translation_table": [],
      "references": [
        {
          "doi": "10.1073/pnas.1517384113",
          "note": "Sparse Identification of Nonlinear Dynamics (SINDy)."
        }
      ],
      "last_reviewed": "2026-05-08",
      "file": "cross-domain/microbiology-mathematics/b-sindy-sparse-discovery-x-host-pathogen-dynamics.yaml"
    },
    {
      "id": "b-music-physics-resonance",
      "title": "The perception of musical consonance and the octave equivalence of musical pitch are direct consequences of Fourier decomposition and the harmonic series — the same mathematical structure that governs resonant modes in vibrating strings, columns, and membranes — making music theory a physical application of wave superposition.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "A vibrating string of length L fixed at both ends produces modes at frequencies f, 2f, 3f, 4f... — the harmonic series. This is a direct consequence of the wave equation boundary conditions (Fourier modes of a bounded domain). The same series emerges from any resonant system with simple boundary con",
      "open_unknowns": [],
      "related_hypotheses": [],
      "communication_gap": "Music theory is taught as a historical/descriptive discipline in conservatories without reference to the acoustic physics that explains why the rules work. Physics departments teach Fourier analysis and wave mechanics without reference to musical perception. Psychoacoustics sits between the two but is a small specialty. The bridge is known to acousticians (Helmholtz, 1863; Plomp & Levelt, 1965) and is occasionally noted in physics pedagogy, but has not been integrated into mainstream music theory education or cognitive neuroscience of music.\n",
      "translation_table": [
        {
          "field_a_term": "Harmonic series (f, 2f, 3f... — physics of resonance)",
          "field_b_term": "Overtone series (musical timbre and pitch perception)"
        },
        {
          "field_a_term": "Fourier decomposition of periodic waveform",
          "field_b_term": "Cochlear basilar membrane frequency analysis"
        },
        {
          "field_a_term": "2:1 frequency ratio (first harmonic)",
          "field_b_term": "Musical octave (perceived same pitch class)"
        },
        {
          "field_a_term": "Shared partials between complex tones",
          "field_b_term": "Consonance perception (low-beating intervals)"
        },
        {
          "field_a_term": "Integer ratio commensurability (3:2, 4:3, 5:4)",
          "field_b_term": "Perfect fifth, fourth, major third (consonant intervals)"
        },
        {
          "field_a_term": "Standing wave boundary conditions",
          "field_b_term": "Instrument acoustics (string length, tube length determine pitch)"
        },
        {
          "field_a_term": "Inharmonic spectra (bells, xylophones)",
          "field_b_term": "Non-Western instruments avoid Western scale; inharmonicity explains why"
        }
      ],
      "references": [
        {
          "doi": "10.1121/1.1906882",
          "note": "Plomp & Levelt (1965) - Tonal consonance and critical bandwidth; psychoacoustic foundation of consonance from harmonic analysis"
        },
        {
          "doi": "10.1098/rspb.2014.0916",
          "note": "Bowling et al. (2018) - Vocal similarity predicts the relative attraction of musical chords; evolutionary connection to harmonic series"
        },
        {
          "url": "https://archive.org/details/onthesensionstof00helm",
          "note": "Helmholtz (1863) - On the Sensations of Tone; foundational acoustic-musical bridge"
        }
      ],
      "last_reviewed": "2026-05-05",
      "file": "cross-domain/music-physics/b-music-physics-resonance.yaml"
    },
    {
      "id": "b-graph-convolution-x-transmission-network-inference",
      "title": "Graph convolution bridges relational representation learning and pathogen transmission-network inference from sparse contact data.",
      "source_domain": "",
      "target_domain": "",
      "status": "proposed",
      "bridge_claim": "Speculative analogy (to be empirically validated): graph convolutional message passing can infer latent transmission linkage structure by integrating mobility, genomic, and contact-network signals under partial observability.",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-graph-convolution-with-mobility-priors-improves-outbreak-link-recovery"
      ],
      "communication_gap": "Epidemiologic contact tracing values causal interpretability, while graph ML often emphasizes predictive performance without directionality guarantees.",
      "translation_table": [
        {
          "field_a_term": "message passing",
          "field_b_term": "risk signal propagation across contact graph",
          "note": "Node updates aggregate neighborhood evidence."
        },
        {
          "field_a_term": "graph embedding",
          "field_b_term": "latent transmission proximity",
          "note": "Embeddings encode probable epidemiologic linkage strength."
        },
        {
          "field_a_term": "edge-conditioned updates",
          "field_b_term": "heterogeneous interaction weighting",
          "note": "Edge metadata captures contact duration and context."
        }
      ],
      "references": [
        {
          "arxiv": "1609.02907",
          "note": "Semi-Supervised Classification with Graph Convolutional Networks."
        },
        {
          "url": "https://www.cdc.gov/outbreaks/index.html",
          "note": "Outbreak investigation context."
        }
      ],
      "last_reviewed": "2026-05-08",
      "file": "cross-domain/network-science-machine-learning/b-graph-convolution-x-transmission-network-inference.yaml"
    },
    {
      "id": "b-glial-cells-brain-homeostasis",
      "title": "Glia bridge neuroscience and biology: astrocytes form the tripartite synapse (modulating transmission), microglia prune synapses via complement tagging (C1q/C3), oligodendrocytes provide metabolic support ΓÇö glial dysfunction drives neurodegeneration across Alzheimer's, MS, and ALS.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "Glial cells (non-neuronal brain cells) are not passive support ΓÇö they are active participants in brain function and homeostasis. Three major types: (1) Astrocytes: form the tripartite synapse ΓÇö astrocyte processes ensheath ~57% of synapses in rodent hippocampus. Functions: glutamate uptake (GLT-",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-complement-mediated-synapse-loss-drives-alzheimers-cognitive-decline"
      ],
      "communication_gap": "Neurophysiologists who study synaptic transmission rarely engage with the immunology of microglial complement-mediated pruning. Neurologists who manage multiple sclerosis (oligodendrocyte pathology) rarely integrate with the tripartite synapse literature. The unification of glial cell biology into a coherent functional picture of brain homeostasis is still incomplete in most neuroscience training programs.\n",
      "translation_table": [
        {
          "field_a_term": "tripartite synapse (pre + post + astrocyte)",
          "field_b_term": "three-cell functional unit replacing the classical two-cell synapse model",
          "note": "astrocyte monitors glutamate spillover and releases modulatory signals; key for plasticity"
        },
        {
          "field_a_term": "GLT-1/GLAST glutamate transporters",
          "field_b_term": "electrogenic transporters (3 NaΓü║ in, 1 HΓü║ in, 1 KΓü║ out per glutamate)",
          "note": "prevent excitotoxicity; electrogenic transport driven by NaΓü║ gradient; 80% of brain glutamate uptake"
        },
        {
          "field_a_term": "C1q-C3 complement tagging of synapses",
          "field_b_term": "molecular immune opsonization applied to neural pruning",
          "note": "same molecular pathway used for bacterial clearance; repurposed for synapse elimination in CNS development"
        },
        {
          "field_a_term": "microglial TREM2 receptor",
          "field_b_term": "Alzheimer's disease risk factor (rare variant TREM2 R47H elevates risk 3-fold)",
          "note": "TREM2 enables microglial response to lipids on damaged neurons and amyloid ΓÇö haploinsufficiency impairs clearance"
        },
        {
          "field_a_term": "oligodendrocyte lactate shuttle",
          "field_b_term": "metabolic support (MCT1/MCT2 monocarboxylate transporters)",
          "note": "oligodendrocytes provide lactate to axons as metabolic fuel; axon degeneration occurs without MCT1"
        }
      ],
      "references": [
        {
          "doi": "10.1016/S0166-2236(98)01349-6",
          "note": "Araque et al. (1999) Tripartite synapses ΓÇö glia, the unacknowledged partner; Trends Neurosci 22:208"
        },
        {
          "doi": "10.1016/j.cell.2007.10.036",
          "note": "Stevens et al. (2007) The classical complement cascade mediates CNS synapse elimination; Cell 131:1164"
        },
        {
          "doi": "10.1146/annurev.immunol.021908.132612",
          "note": "Ransohoff & Perry (2009) Microglial physiology ΓÇö unique stimuli, specialized responses; Annu Rev Immunol 27:119"
        },
        {
          "doi": "10.1038/nrn2830",
          "note": "Nave (2010) Myelination and support of axonal integrity by glia; Nat Rev Neurosci 11:275"
        }
      ],
      "last_reviewed": "2026-05-07",
      "file": "cross-domain/neuroscience-biology/b-glial-cells-brain-homeostasis.yaml"
    },
    {
      "id": "b-memory-reconsolidation-synaptic-plasticity",
      "title": "Memory reconsolidation—the requirement for new protein synthesis to re- stabilise a memory after retrieval—is mechanistically identical to the late-phase long-term potentiation (L-LTP) that initially encodes the memory: both require NMDA-receptor activation, CaMKII autophosphorylation, CREB-mediated transcription, and de novo synaptic protein synthesis.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "proposed",
      "bridge_claim": "Nader, Schafe & LeDoux (2000) showed that infusing the protein synthesis inhibitor anisomycin into the basolateral amygdala immediately after a conditioned-fear memory is reactivated causes amnesia for that memory, demonstrating that retrieved memories re-enter a labile state requiring restabilisati",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-reconsolidation-ampar-endocytosis-labilisation"
      ],
      "communication_gap": "The reconsolidation discovery (Nader 2000) was initially controversial within cognitive neuroscience; its molecular equivalence to L-LTP mechanisms is accepted in cellular neuroscience but the implications for therapeutic memory modification are debated. Clinical memory researchers and synaptic plasticity researchers publish in different journals.\n",
      "translation_table": [
        {
          "field_a_term": "Memory reactivation by retrieval cue",
          "field_b_term": "Re-induction of synaptic LTP by repeated stimulation",
          "note": "Both require NMDA-R activation; retrieval-induced NMDA activation triggers labilisation"
        },
        {
          "field_a_term": "Protein synthesis inhibitor (anisomycin) causing reconsolidation amnesia",
          "field_b_term": "Blockade of L-LTP protein synthesis preventing memory consolidation",
          "note": "Same pharmacological tools block both processes; same proteins required (Arc, CaMKII, BDNF)"
        },
        {
          "field_a_term": "Reconsolidation window (hours after retrieval)",
          "field_b_term": "L-LTP consolidation window (hours after induction)",
          "note": "Both windows reflect the time required for mRNA transcription and protein synthesis"
        },
        {
          "field_a_term": "Memory update after reconsolidation (incorporation of new information)",
          "field_b_term": "Synaptic weight modification during LTP restabilisation",
          "note": "Both mechanisms allow modification of existing traces; reconsolidation = update opportunity"
        }
      ],
      "references": [
        {
          "doi": "10.1038/35021052",
          "note": "Nader, Schafe & LeDoux (2000) Nature – fear memories require protein synthesis in the amygdala for reconsolidation"
        },
        {
          "doi": "10.1126/science.294.5544.1030",
          "note": "Kandel (2001) Science – the molecular biology of memory storage: a dialogue between genes and synapses"
        },
        {
          "doi": "10.1038/nrn1425",
          "note": "Nader & Hardt (2009) Nature Reviews Neuroscience – a single standard for memory: the case for reconsolidation"
        },
        {
          "doi": "10.1016/j.neuron.2014.11.010",
          "note": "Bhaskaran & Smith (2014) – AMPA receptor trafficking during reconsolidation and LTP"
        }
      ],
      "last_reviewed": "2026-05-07",
      "file": "cross-domain/neuroscience-biology/b-memory-reconsolidation-synaptic-plasticity.yaml"
    },
    {
      "id": "b-neurodegeneration-protein-aggregation",
      "title": "All major neurodegenerative diseases — Parkinson's (alpha-synuclein), Alzheimer's (Abeta, tau), and prion diseases — are protein aggregation disorders with nucleation- elongation kinetics identical to protein crystallization, and they spread through neural circuits by prion-like templated misfolding.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "Parkinson's disease: alpha-synuclein (SNCA gene product) misfolds from its natively unstructured form into beta-sheet-rich oligomers and then into Lewy body inclusions. The aggregation kinetics follow nucleation-elongation theory: a slow lag phase (nucleation of a critical oligomeric nucleus) follow",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-tau-propagation-circuit-connectivity-determines-staging"
      ],
      "communication_gap": "Neurodegeneration research is historically siloed by disease (Alzheimer's researchers vs. Parkinson's researchers) despite the mechanistic commonality. Protein biophysicists who study nucleation-elongation kinetics rarely collaborate with neurologists who treat patients. The prion-like framing was initially controversial among disease-specific communities. The Braak staging papers (neuropathology) took years to be absorbed by the molecular biology and drug development communities.\n",
      "translation_table": [
        {
          "field_a_term": "nucleation lag phase (critical nucleus)",
          "field_b_term": "slow initiation of Lewy body / plaque formation in early disease",
          "note": "Lag phase duration is concentration-dependent; explains decades of silent progression"
        },
        {
          "field_a_term": "elongation rate constant k_+",
          "field_b_term": "rate of amyloid fibril growth by monomer addition",
          "note": "Amenable to inhibition by small molecules or antibodies targeting fibril ends"
        },
        {
          "field_a_term": "prion-like templated misfolding",
          "field_b_term": "prion mechanism applied to Parkinson's, Alzheimer's, ALS",
          "note": "Braak staging is the anatomical manifestation of prion-like circuit propagation"
        },
        {
          "field_a_term": "amyloid cascade hypothesis (Abeta initiates tau, tau drives death)",
          "field_b_term": "upstream-downstream causal chain in disease progression",
          "note": "Hardy & Selkoe (2002) — tested by anti-Abeta immunotherapy trials"
        },
        {
          "field_a_term": "critical aggregation concentration",
          "field_b_term": "threshold alpha-synuclein or Abeta concentration for aggregation onset",
          "note": "Explains why gene dosage (triplication vs. duplication) matters for age of onset"
        }
      ],
      "references": [
        {
          "doi": "10.1126/science.1072994",
          "note": "Hardy & Selkoe (2002) Science 297:353 — amyloid cascade hypothesis"
        },
        {
          "doi": "10.1007/BF00308809",
          "note": "Braak & Braak (1991) Acta Neuropathol 82:239 — Parkinson's staging"
        },
        {
          "doi": "10.1073/pnas.95.23.13363",
          "note": "Prusiner (1998) PNAS 95:13363 — prion hypothesis"
        },
        {
          "doi": "10.1038/388839a0",
          "note": "Spillantini et al. (1997) Nature 388:839 — alpha-synuclein in Lewy bodies"
        }
      ],
      "last_reviewed": "2026-05-06",
      "file": "cross-domain/neuroscience-biology/b-neurodegeneration-protein-aggregation.yaml"
    },
    {
      "id": "b-neuronal-fatigue-metabolic-depletion-resource-models",
      "title": "Neuronal fatigue — the declining response of neurons during sustained stimulation — is explained by resource depletion models from biophysics: synaptic vesicle pools, ATP availability, and ion gradient rundown follow first-order depletion kinetics, creating a quantitative bridge between cellular metabolism and neural computation.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "The Tsodyks-Markram (TM) resource model of short-term synaptic depression: dx/dt = (1-x)/τ_rec - u·x·δ(t-t_spike) where x ∈ [0,1] is available vesicle fraction, τ_rec is recovery time constant, and u is utilization probability. The effective synaptic strength is A_eff = A·x·u, decaying during high-f",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-neural-fatigue-computational-role-temporal-filtering-adaptation"
      ],
      "communication_gap": "Computational neuroscientists using the Tsodyks-Markram model for short-term plasticity and metabolic neuroscientists studying astrocyte-neuron coupling rarely integrate their frameworks; quantitative metabolic constraints on neural firing patterns (linking fMRI BOLD signals to firing rate limits via ATP budgets) represent an active cross-disciplinary gap despite shared biophysical foundations.\n",
      "translation_table": [
        {
          "field_a_term": "synaptic depression / neural fatigue (neuroscience)",
          "field_b_term": "resource depletion in a driven dynamical system (biophysics)",
          "note": "Both follow first-order kinetics with depletion proportional to usage rate"
        },
        {
          "field_a_term": "vesicle pool size (neuroscience)",
          "field_b_term": "reservoir capacity in a resource model (biophysics)",
          "note": "Available vesicle fraction x is the dimensionless resource variable"
        },
        {
          "field_a_term": "recovery from fatigue τ_rec (neuroscience)",
          "field_b_term": "resource replenishment time constant (biophysics)",
          "note": "τ_rec ≈ 800 ms for typical synapses; sets the low-pass filter cutoff for neural transmission"
        },
        {
          "field_a_term": "ATP depletion during prolonged firing (neuroscience)",
          "field_b_term": "metabolic bottleneck / substrate limitation (biophysics)",
          "note": "Na/K-ATPase consumes ~50% of neural ATP; sustained high firing can deplete local ATP stores"
        }
      ],
      "references": [
        {
          "doi": "10.1126/science.275.5297.213",
          "note": "Tsodyks & Markram (1997) - the neural code between neocortical pyramidal neurons (TM model)"
        },
        {
          "doi": "10.1073/pnas.94.2.719",
          "note": "Markram et al. (1997) - physiology and anatomy of synaptic connections between neocortical pyramidal neurons"
        },
        {
          "doi": "10.1152/jn.00101.2004",
          "note": "Attwell & Laughlin (2001) - the energy budget for signaling in the grey matter of the brain"
        }
      ],
      "last_reviewed": "2026-05-07",
      "file": "cross-domain/neuroscience-biology/b-neuronal-fatigue-metabolic-depletion-resource-models.yaml"
    },
    {
      "id": "b-retinal-waves-spontaneous-activity",
      "title": "Spontaneous correlated activity (retinal waves) in the developing retina drives Hebbian refinement of retinotopic maps in superior colliculus and lateral geniculate nucleus via activity-dependent synaptic plasticity: the spatial correlation structure of the waves encodes positional information that substitutes for visual experience before eye-opening.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "Before eye-opening, retinal ganglion cells (RGCs) fire in propagating waves mediated by gap junctions (Stage I) and cholinergic amacrine cells (Stage II) that produce correlated bursts in neighbouring RGCs. The Hebb rule—\"cells that fire together wire together\"—predicts that correlated RGC pairs for",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-retinal-wave-bandwidth-map-resolution-constraint"
      ],
      "communication_gap": "Developmental neurobiologists studying retinal waves and computational neuroscientists studying Hebbian learning and map formation publish in separate journals (J. Neurosci., Neuron vs. Neural Computation, PLOS Computational Biology). The information-theoretic framing of retinal waves as a topographic code is underexplored in experimental literature.\n",
      "translation_table": [
        {
          "field_a_term": "Correlated bursting of neighbouring RGCs during a wave",
          "field_b_term": "Hebb-rule co-activation driving LTP at shared LGN target",
          "note": "Spatial correlation gradient of waves encodes retinal neighbourhood; Hebb rule reads this gradient"
        },
        {
          "field_a_term": "Retinal wave propagation velocity (~200 μm/s)",
          "field_b_term": "Spatial bandwidth of activity correlation (sets map resolution)",
          "note": "Faster waves correlate larger retinal neighbourhoods; slower waves encode finer topography"
        },
        {
          "field_a_term": "Stage II cholinergic waves (ACh-mediated)",
          "field_b_term": "Critical period analogue: ACh sets gain and spatial extent of correlation",
          "note": "β2 nicotinic receptor knockout eliminates waves and disrupts retinotopic maps"
        },
        {
          "field_a_term": "Eye-specific segregation in LGN",
          "field_b_term": "Competitive Hebbian learning: ipsi vs. contra RGC firing decorrelated",
          "note": "Eyes fire asynchronous waves; anti-correlation drives segregation into laminae"
        }
      ],
      "references": [
        {
          "doi": "10.1126/science.1254927",
          "note": "Ackman et al. (2012) Science – retinal waves drive retinotopic map refinement in V1 before eye-opening"
        },
        {
          "doi": "10.1016/0092-8674(96)90606-1",
          "note": "Katz & Shatz (1996) Cell – synaptic activity and development of the visual cortex; wave review"
        },
        {
          "doi": "10.1126/science.7770778",
          "note": "Meister et al. (1991) Science – first observation of correlated spontaneous activity in retina"
        },
        {
          "doi": "10.1523/JNEUROSCI.2395-07.2007",
          "note": "Huberman et al. (2007) J. Neurosci. – beta2 nAChR knockout disrupts retinal wave correlation and retinotopy"
        }
      ],
      "last_reviewed": "2026-05-07",
      "file": "cross-domain/neuroscience-biology/b-retinal-waves-spontaneous-activity.yaml"
    },
    {
      "id": "b-nociception-gate-control-spinal-circuit",
      "title": "The gate control theory of pain formalises nociceptive processing as a biophysical circuit in the spinal cord dorsal horn: large-diameter non-nociceptive (A-beta) fibres activate inhibitory interneurons that gate ascending pain signals from small-diameter (A-delta, C) fibres, making pain a dynamically regulated signal rather than a fixed-gain sensory channel.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "Melzack & Wall (1965) modelled the dorsal horn as a circuit with a substantia gelatinosa (SG) interneuron that inhibits the transmission (T) cell projecting to higher brain centres. Non-nociceptive A-beta input excites SG (closes the gate); A-delta/C input inhibits SG (opens the gate). Mathematicall",
      "open_unknowns": [],
      "related_hypotheses": [],
      "communication_gap": "Pain clinicians know gate control theory as a narrative mechanism for TENS and SCS; biophysicists rarely model the spinal dorsal horn circuitry as an explicit signal-processing circuit with transfer functions amenable to quantitative analysis. The formal biophysical circuit formulation is not standard in clinical pain medicine curricula.\n",
      "translation_table": [
        {
          "field_a_term": "subtraction circuit / lateral inhibition (biophysics)",
          "field_b_term": "SG interneuron gate in dorsal horn (neuroscience)",
          "note": "SG excited by A-beta (large fibres) inhibits T cell; analogous to retinal lateral inhibition"
        },
        {
          "field_a_term": "conditional inhibition / AND-NOT gate (biophysics)",
          "field_b_term": "pain gating by non-nociceptive touch input (neuroscience)",
          "note": "Touch opens A-beta activity -> SG -> inhibits T cell; rubbing an injury reduces pain"
        },
        {
          "field_a_term": "biophysical circuit transfer function (biophysics)",
          "field_b_term": "pain perception as function of nociceptive vs. non-nociceptive fibre balance (neuroscience)",
          "note": "T_output = f(A_delta, C, A_beta, descending modulation); measurable by laser Doppler and evoked potentials"
        },
        {
          "field_a_term": "gain control by descending input (biophysics)",
          "field_b_term": "PAG-RVM descending opioidergic modulation of spinal gate (neuroscience)",
          "note": "Stress-induced analgesia and opioid analgesia work through descending gate-closing mechanisms"
        }
      ],
      "references": [
        {
          "doi": "10.1126/science.150.3699.971",
          "note": "Melzack & Wall (1965) - pain mechanisms; a new theory; original gate control paper"
        },
        {
          "doi": "10.1038/nrn3086",
          "note": "Todd (2010) - neuronal circuitry for pain processing in the dorsal horn"
        },
        {
          "doi": "10.1016/j.pain.2008.02.001",
          "note": "Moayedi & Davis (2013) - theories of pain; gate control vs. predictive coding"
        }
      ],
      "last_reviewed": "2026-05-07",
      "file": "cross-domain/neuroscience-biophysics/b-nociception-gate-control-spinal-circuit.yaml"
    },
    {
      "id": "b-synaptic-vesicle-snare-fusion",
      "title": "Synaptic vesicle fusion is mechanically gated by SNARE complex zippering force: the ~20 pN force generated by progressive SNARE assembly drives membrane merger through a series of hemi-fusion intermediates, quantified by single-molecule force spectroscopy and simulated by coarse-grained molecular dynamics\n",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "SNARE complex assembly exerts a vectorial mechanical force (~14-20 pN measured by optical tweezers) that overcomes the ~50 kT energy barrier to bilayer fusion; the sequential N-to-C zippering of v-SNARE (synaptobrevin) with t-SNAREs (syntaxin/SNAP-25) is the molecular force transducer converting che",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-snare-zippering-force-gates-fusion-rate"
      ],
      "communication_gap": "Neurophysiologists measure release probability electrophysiologically while biophysicists measure SNARE forces with optical tweezers; the quantitative connection between force generation and release probability is rarely made explicit across the two communities.\n",
      "translation_table": [
        {
          "field_a_term": "synaptic vesicle priming (neuroscience)",
          "field_b_term": "SNARE complex partial assembly to force-generating intermediate (biophysics)",
          "note": "Primed vesicles have partially assembled SNARE zippers capable of completing fusion on Ca2+ signal"
        },
        {
          "field_a_term": "Ca2+-triggered exocytosis (neuroscience)",
          "field_b_term": "synaptotagmin-driven SNARE zippering completion under Ca2+ binding (biophysics)",
          "note": "Ca2+ binding to synaptotagmin relieves the clampin effect and completes SNARE zippering"
        },
        {
          "field_a_term": "readily releasable pool size (neuroscience)",
          "field_b_term": "number of fully primed SNARE complexes at active zones (biophysics)",
          "note": "RRP correlates with SNARE complex number measured by fluorescence correlation spectroscopy"
        },
        {
          "field_a_term": "neurotransmitter release probability (neuroscience)",
          "field_b_term": "probability of SNARE-driven membrane fusion per action potential (biophysics)",
          "note": "Release probability is determined by SNARE assembly state and Ca2+ sensor affinity"
        }
      ],
      "references": [
        {
          "doi": "10.1038/s41594-020-0457-x",
          "note": "Sutton lab (2020) - cryo-EM of SNARE-synaptotagmin complex at atomic resolution"
        },
        {
          "doi": "10.1126/science.aad1480",
          "note": "Gao et al. (2012) - single-molecule force measurement of SNARE zippering energy"
        }
      ],
      "last_reviewed": "2026-05-07",
      "file": "cross-domain/neuroscience-biophysics/b-synaptic-vesicle-snare-fusion.yaml"
    },
    {
      "id": "b-anesthesia-consciousness-suppression",
      "title": "General anesthesia bridges neuroscience and chemistry: volatile agents potentiate GABA-A and inhibit NMDA receptors to reliably suppress consciousness, yet the Meyer-Overton lipophilicity correlation and the hard problem of consciousness remain unresolved after 125 years.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "General anesthesia requires four components: unconsciousness, amnesia, analgesia, and muscle relaxation. The chemical mechanisms are partially understood: volatile anesthetics (isoflurane, sevoflurane, desflurane) potentiate GABA-A receptors (increasing Cl⁻ conductance → neuronal hyperpolarization) ",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-ketamine-antidepressant-ampa-potentiation-mechanism"
      ],
      "communication_gap": "Anesthesiologists administer anesthetics clinically and focus on MAC values, cardiovascular effects, and depth monitoring (BIS). The molecular pharmacology of receptor mechanisms is a separate literature (Journal of Biological Chemistry, Neuropharmacology). Consciousness researchers rarely engage with the chemistry. The Meyer-Overton correlation has been taught as mechanistic explanation long after the evidence pointed to protein targets.\n",
      "translation_table": [
        {
          "field_a_term": "volatile anesthetic (isoflurane, sevoflurane)",
          "field_b_term": "allosteric potentiator of GABA-A receptor Cl⁻ channel",
          "note": "binds transmembrane domain of GABA-A; shifts dose-response curve leftward"
        },
        {
          "field_a_term": "MAC (minimum alveolar concentration)",
          "field_b_term": "ED₅₀ for anesthetic endpoint; comparable to Ki in pharmacology",
          "note": "species-universal measure; scales with oil:water partition coefficient"
        },
        {
          "field_a_term": "Meyer-Overton correlation",
          "field_b_term": "partition coefficient determines membrane vs. aqueous anesthetic concentration",
          "note": "correlation holds but mechanism is protein targets, not lipid bilayer bulk"
        },
        {
          "field_a_term": "ketamine (NMDA antagonist)",
          "field_b_term": "open-channel blocker of NR1/NR2B NMDA receptor",
          "note": "produces dissociative anesthesia; metabolite HNK is antidepressant via AMPA"
        },
        {
          "field_a_term": "EEG under anesthesia (burst suppression, delta oscillations)",
          "field_b_term": "network-level signature of GABA-A-mediated inhibition suppressing cortical activity",
          "note": "EEG monitoring (BIS index) tracks anesthetic depth; chemistry → network → EEG"
        }
      ],
      "references": [
        {
          "note": "Meyer (1899) Zur Theorie der Alkoholnarkose; Arch Exp Pathol Pharmakol 42:109"
        },
        {
          "doi": "10.1038/367607a0",
          "note": "Franks & Lieb (1994) Molecular and cellular mechanisms of general anaesthesia; Nature 367:607"
        },
        {
          "doi": "10.1126/science.1163506",
          "note": "Alkire et al. (2008) Consciousness and anesthesia; Science 322:876"
        },
        {
          "doi": "10.1038/nature17998",
          "note": "Zanos et al. (2016) NMDAR inhibition-independent antidepressant actions of ketamine metabolites; Nature 533:481"
        }
      ],
      "last_reviewed": "2026-05-07",
      "file": "cross-domain/neuroscience-chemistry/b-anesthesia-consciousness-suppression.yaml"
    },
    {
      "id": "b-ion-channel-gating-x-metastable-rate-theory",
      "title": "Voltage-gated ion channels switch among discrete conducting states via stochastic transitions whose voltage dependence maps to energy barriers — chemical physics metastability and Kramers-type rate theory relate barrier heights and attempt frequencies to exponential transition rates — bridges molecular electrophysiology with condensed-phase reaction-rate formalisms already used for ligand gating and enzyme catalysis.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "Patch-clamp dwell-time distributions for channel openings/closings inform Markov state models with voltage-dependent transition rates α(V), β(V) often modeled Arrhenius-like — identical mathematical scaffold appears in barrier-crossing theory connecting metastable wells separated by transition state",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-markov-gating-graph-consistency-with-kramers-scaling-under-voltage-clamp-protocols"
      ],
      "communication_gap": "Ion-channel biophysicists emphasize Hodgkin–Huxley empirical fits first; physical chemists emphasize rare-event sampling — modern structural biology increasingly unifies both via MD milestones yet jargon mismatch persists.\n",
      "translation_table": [
        {
          "field_a_term": "Voltage-dependent opening/closing rates α(V), β(V)",
          "field_b_term": "Arrhenius/Kramers exponential rate parameters vs barrier ΔG‡",
          "note": "Shared exponential sensitivity — mechanisms include gating charge displacement."
        },
        {
          "field_a_term": "Multi-state Markov gating schemes (inactivation chains)",
          "field_b_term": "Networks of metastable basins on complex energy landscapes",
          "note": "Graph reductions parallel Markov chain coarse-graining."
        },
        {
          "field_a_term": "Gating current measurements (charge movement prior to pore opening)",
          "field_b_term": "Transition-path sampling along polar reaction coordinates",
          "note": "Dynamics experiments constrain landscape slices coupling electrophysiology to theory."
        }
      ],
      "references": [
        {
          "doi": "10.1113/jphysiol.1952.sp004764",
          "note": "Hodgkin & Huxley (1952) — quantitative description of membrane current gating"
        },
        {
          "doi": "10.1103/RevModPhys.62.251",
          "note": "Hänggi, Talkner & Borkovec (1990) — reaction-rate theory review spanning barrier crossing"
        }
      ],
      "last_reviewed": "2026-05-07",
      "file": "cross-domain/neuroscience-chemistry/b-ion-channel-gating-x-metastable-rate-theory.yaml"
    },
    {
      "id": "b-neurogenesis-growth-factor-signaling",
      "title": "Adult hippocampal neurogenesis (~700 new neurons/day in humans) is regulated by BDNF-TrkB, VEGF, and IGF-1 signaling cascades activated by exercise — providing the neurochemical mechanism for exercise antidepressant effects and SSRI-dependent neurogenesis hypothesis of depression.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "Adult neurogenesis — the production of new neurons from neural stem cells in the adult brain — occurs in two primary niches: the subgranular zone (SGZ) of the hippocampal dentate gyrus and the subventricular zone (SVZ) feeding the olfactory bulb. Spalding et al. (2013) used radiocarbon ¹⁴C dating of",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-neurogenesis-requirement-ssri-antidepressant-human-evidence"
      ],
      "communication_gap": "Adult neurogenesis research spans molecular biology (growth factor signaling, published in Cell, J Neurosci), behavioral neuroscience (depression models, published in Neuropsychopharmacology, Biol Psychiatry), and psychiatry (clinical antidepressant trials, published in NEJM, Lancet Psychiatry). Basic scientists identifying BDNF-TrkB signaling pathways rarely engage with clinical trial design. Psychiatrists designing SSRI trials are largely unaware of the mechanistic neurogenesis literature. The radiocarbon dating method for neurogenesis (Spalding) requires expertise in accelerator mass spectrometry (AMS) that no single neuroscience lab possesses — creating a bottleneck in the most direct test of human neurogenesis.\n",
      "translation_table": [
        {
          "field_a_term": "BDNF-TrkB kinase signaling (neurotrophin receptor cascade)",
          "field_b_term": "tyrosine kinase receptor → intracellular second messenger cascade"
        },
        {
          "field_a_term": "PI3K-Akt pathway (pro-survival, anti-apoptotic)",
          "field_b_term": "phosphoinositide kinase cascade controlling cell fate"
        },
        {
          "field_a_term": "MAPK-ERK pathway (proliferation, differentiation)",
          "field_b_term": "mitogen-activated kinase cascade for cell cycle entry"
        },
        {
          "field_a_term": "BrdU (bromodeoxyuridine) labeling of dividing cells",
          "field_b_term": "pulse-chase experiment to track newly synthesized DNA"
        },
        {
          "field_a_term": "¹⁴C retrospective birth dating (Spalding method)",
          "field_b_term": "radiocarbon isotope pulse-chase at population level"
        },
        {
          "field_a_term": "SSRI-induced BDNF increase → neurogenesis → antidepressant",
          "field_b_term": "pharmacological target (5-HT transporter) → downstream growth factor cascade"
        },
        {
          "field_a_term": "exercise lactate → HIF-1α → BDNF/VEGF upregulation",
          "field_b_term": "metabolic signaling (oxygen-sensing pathway) → growth factor gene expression"
        }
      ],
      "references": [
        {
          "doi": "10.1016/j.cell.2013.05.004",
          "note": "Spalding et al. (2013) Cell 153:1219 — dynamics of hippocampal neurogenesis in adult humans (¹⁴C dating)"
        },
        {
          "doi": "10.1038/s41591-018-0038-z",
          "note": "Sorrells et al. (2018) Nat Med 25:554 — human hippocampal neurogenesis drops sharply in children to undetectable levels in adults (contested)"
        },
        {
          "doi": "10.1126/science.1089672",
          "note": "Castrén (2004) Science 304:529 — neurotrophins as mediators of drug effects on mood, addiction, and neuroprotection"
        },
        {
          "note": "Bhattacharya et al. (2014) Cell 157:1 — a cell biology perspective on BDNF signaling and antidepressant drug action (review)"
        }
      ],
      "last_reviewed": "2026-05-06",
      "file": "cross-domain/neuroscience-chemistry/b-neurogenesis-growth-factor-signaling.yaml"
    },
    {
      "id": "b-neuropeptides-hypothalamic-control",
      "title": "Neuropeptides and Hypothalamic Control — leptin, GLP-1, AgRP/POMC circuits, oxytocin, and vasopressin integrate energy homeostasis with social and reproductive behaviour",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "The hypothalamus integrates autonomic, endocrine, and behavioural functions through neuropeptide signalling circuits. Energy homeostasis centres on the arcuate nucleus (ARC): AgRP/NPY neurons (orexigenic — stimulate feeding) and POMC/CART neurons (anorexigenic — suppress feeding) form a push-pull ci",
      "open_unknowns": [],
      "related_hypotheses": [],
      "communication_gap": "Endocrinologists studying peripheral hormone physiology, neurobiologists studying hypothalamic circuits, and pharmacologists developing anti-obesity drugs operate with different conceptual frameworks and terminology. The explosive clinical success of GLP-1 agonists has created renewed cross-disciplinary interest, but the molecular neuroscience of hypothalamic neuropeptide circuits is still not fully integrated with metabolic medicine. Peptide drugs targeting brain receptors face blood-brain barrier challenges that are insufficiently appreciated in clinical development.\n",
      "translation_table": [
        {
          "field_a_term": "leptin (ob gene product, adipokine)",
          "field_b_term": "adiposity signal reporting fat stores to hypothalamic energy circuits",
          "note": "Leptin acts as a proportional signal: ob/ob mice (no leptin) eat ad libitum and reach 3× normal body weight"
        },
        {
          "field_a_term": "AgRP/NPY neurons (arcuate nucleus orexigenic)",
          "field_b_term": "hunger-promoting circuit activated by fasting and ghrelin",
          "note": "Optogenetic activation of AgRP neurons triggers immediate food seeking; ablation causes fatal anorexia"
        },
        {
          "field_a_term": "POMC → α-MSH (melanocortin)",
          "field_b_term": "satiety signal acting on MC3R/MC4R in hypothalamus",
          "note": "MC4R mutations cause monogenic obesity; α-MSH agonists (setmelanotide) are approved for MC4R-pathway obesity"
        },
        {
          "field_a_term": "GLP-1 receptor agonist (semaglutide)",
          "field_b_term": "pharmacological mimicry of post-meal satiety signal",
          "note": "Once-weekly semaglutide injection achieves 15–20% weight loss; dual GLP-1/GIP agonist (tirzepatide) achieves ~22%"
        },
        {
          "field_a_term": "oxytocin (PVN nonapeptide)",
          "field_b_term": "social bonding, trust, and pain modulation neuromodulator",
          "note": "Intranasal oxytocin increases trust in economic games; reduces social anxiety in autism trials (modest effect)"
        },
        {
          "field_a_term": "vasopressin V1aR receptor distribution (species-specific)",
          "field_b_term": "molecular basis of monogamous vs. promiscuous mating strategies in voles",
          "note": "Lim & Young: V1aR transfer to ventral forebrain of promiscuous meadow vole induces partner preference — receptor distribution, not peptide, determines behaviour"
        }
      ],
      "references": [
        {
          "doi": "10.1038/372425a0",
          "note": "Zhang et al. (1994) Nature 372:425 — cloning of leptin (ob gene)"
        },
        {
          "doi": "10.1016/j.cmet.2017.12.026",
          "note": "Drucker (2018) Cell Metab 27:740 — GLP-1 receptor agonists in diabetes and obesity"
        },
        {
          "doi": "10.1038/nature05009",
          "note": "Lim & Young (2006) Nature 443:595 — vasopressin receptor and pair bonding"
        },
        {
          "doi": "10.1038/nature01521",
          "note": "Batterham et al. (2002) Nature 418:650 — PYY3-36 reduces food intake via ARC"
        }
      ],
      "last_reviewed": "2026-05-06",
      "file": "cross-domain/neuroscience-chemistry/b-neuropeptides-hypothalamic-control.yaml"
    },
    {
      "id": "b-neurotransmitter-pharmacology",
      "title": "Synaptic neurotransmission is governed by the physical chemistry of SNARE protein complex assembly (ΔG ≈ -65 kJ/mol), vesicle fusion kinetics, and receptor binding thermodynamics (K_D = k_off/k_on), providing a molecular pharmacological framework where all drug mechanisms — SSRIs, antipsychotics, benzodiazepines — reduce to modulation of specific binding equilibria.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "Synaptic transmission is a sequence of precisely characterised physical chemistry steps. Vesicle docking/priming: SNARE complex formation between synaptobrevin (VAMP, v-SNARE on vesicle), syntaxin-1 and SNAP-25 (t-SNAREs on target membrane) releases ΔG ≈ -65 kJ/mol per complex — driving membrane fus",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-snare-zippering-energy-controls-vesicle-fusion-probability"
      ],
      "communication_gap": "Neuroscience and chemistry interact at the pharmacology interface but often superficially. Neuroscientists typically learn receptor pharmacology phenomenologically (dose-response curves, agonist/antagonist), while physical chemists and biochemists develop the thermodynamic/kinetic theory without knowing the neurobiological context. Drug discovery companies bridge these fields but proprietary research withholds mechanistic insights from academia. The Südhof Nobel Lecture (2013) is an exception — a neuroscientist presenting the full physical chemistry of vesicle fusion.\n",
      "translation_table": [
        {
          "field_a_term": "SNARE complex zippering free energy ΔG ≈ -65 kJ/mol",
          "field_b_term": "vesicle fusion driving force — thermodynamic engine of synaptic release",
          "note": "ΔG corresponds to ~26 k_BT at 37°C — sufficient to overcome membrane fusion barrier"
        },
        {
          "field_a_term": "dissociation constant K_D = k_off/k_on (receptor binding)",
          "field_b_term": "drug potency and effective concentration (IC₅₀ ≈ K_D for simple competitive inhibition)",
          "note": "K_D determines synaptic time constant through τ_unbinding = 1/k_off"
        },
        {
          "field_a_term": "allosteric modulator (positive/negative) of receptor complex",
          "field_b_term": "benzodiazepine mechanism — shifts GABA concentration-response curve",
          "note": "Allosteric vs. orthosteric binding is purely geometric chemistry; has distinct pharmacological signatures"
        },
        {
          "field_a_term": "Hill coefficient n in dose-response curve",
          "field_b_term": "cooperativity of receptor activation (n>1 = switch-like; n=1 = Michaelis-Menten)",
          "note": "Synaptotagmin Ca²⁺ cooperativity n≈5 creates the sharp threshold for vesicle release"
        },
        {
          "field_a_term": "reuptake transporter (SERT, DAT, NET) Km and Vmax",
          "field_b_term": "synaptic neurotransmitter clearance rate — determines signal duration",
          "note": "SSRIs elevate [5-HT] by blocking SERT; the spatial gradient of 5-HT depends on DAT Vmax"
        }
      ],
      "references": [
        {
          "note": "Südhof (2013) Neurotransmitter Release: The Last Millisecond in the Life of a Synaptic Vesicle. Nobel Lecture. https://www.nobelprize.org/prizes/medicine/2013/sudhof/lecture/"
        },
        {
          "note": "Katz (1966) Nerve, Muscle, and Synapse. McGraw-Hill"
        },
        {
          "note": "Nestler, Hyman & Malenka (2015) Molecular Neuropharmacology: A Foundation for Clinical Neuroscience. 3rd edn. McGraw-Hill"
        },
        {
          "note": "Stahl (2013) Stahl's Essential Psychopharmacology: Neuroscientific Basis and Practical Applications. 4th edn. Cambridge University Press"
        }
      ],
      "last_reviewed": "2026-05-06",
      "file": "cross-domain/neuroscience-chemistry/b-neurotransmitter-pharmacology.yaml"
    },
    {
      "id": "b-neural-criticality-climate-tipping",
      "title": "Neural systems at criticality and climate systems near tipping points share identical mathematical signatures — diverging correlation length, critical slowing down (AR1 coefficient → 1), and power-law fluctuations — because both are governed by the same bifurcation theory of nonlinear dynamical systems.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "proposed",
      "bridge_claim": "Beggs & Plenz (2003) showed that cortical networks self-organize to a critical point where neuronal avalanche sizes follow a power law P(s) ~ s^{-3/2} — the mean-field branching process critical exponent. At this point, the network exhibits maximum dynamic range, information transmission, and suscep",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-neural-ew-indicators-climate-tipping-transfer"
      ],
      "communication_gap": "Neuroscientists studying criticality publish in Journal of Neuroscience, PLOS Computational Biology, and Nature Neuroscience. Climate tipping-point researchers publish in Nature, PNAS, and Nature Climate Change. Despite both literatures using identical mathematical formalisms (fold bifurcation, AR1 EWI, variance precursor, DFA exponent), there is essentially no citation overlap. The 2009 Scheffer Nature paper on tipping points never cites Beggs & Plenz (2003) despite discovering the same phenomenology. Workshops on critical transitions in complex systems occasionally bridge the gap, but most practitioners in each field are unaware of the methodological toolkit developed by the other.\n",
      "translation_table": [
        {
          "field_a_term": "Excitation/inhibition (E/I) balance in cortex",
          "field_b_term": "Net climate forcing vs. restoring feedback strength",
          "note": "Both are the bifurcation control parameter r in the fold normal form"
        },
        {
          "field_a_term": "Neuronal avalanche size distribution P(s) ~ s^{-3/2}",
          "field_b_term": "Precipitation extreme distribution / storm intensity power law",
          "note": "Mean-field critical exponent -3/2 appears in both when at criticality"
        },
        {
          "field_a_term": "Critical slowing down (recovery rate λ → 0 after perturbation)",
          "field_b_term": "AR1 early-warning indicator rising toward 1.0 before tipping",
          "note": "Both measure the leading eigenvalue of the linearized dynamics approaching zero"
        },
        {
          "field_a_term": "Branching ratio σ → 1 (sub-critical < 1, super-critical > 1)",
          "field_b_term": "Effective feedback gain approaching 1 near tipping point",
          "note": "σ = 1 is the critical point in both branching-process descriptions"
        },
        {
          "field_a_term": "Hopf bifurcation (oscillatory) at critical E/I ratio γ_c",
          "field_b_term": "Fold (saddle-node) bifurcation at climate tipping threshold",
          "note": "Neural criticality can be either Hopf or fold depending on circuit topology"
        },
        {
          "field_a_term": "Detrended fluctuation analysis (DFA) exponent ≈ 1 at criticality",
          "field_b_term": "1/f noise in paleoclimate proxies near past tipping events",
          "note": "DFA exponent = 1 indicates long-range correlations; both show this near transitions"
        }
      ],
      "references": [
        {
          "doi": "10.1523/JNEUROSCI.23-35-11167.2003",
          "note": "Beggs & Plenz (2003) J Neurosci — neuronal avalanches in cortex; power law P(s) ~ s^{-3/2}; foundational experimental demonstration of neural criticality\n"
        },
        {
          "doi": "10.1038/nature08227",
          "note": "Scheffer et al. (2009) Nature — \"Early-warning signals for critical transitions\"; AR1 and variance as universal precursors to tipping points across systems\n"
        },
        {
          "doi": "10.1073/pnas.0802430105",
          "note": "Dakos et al. (2008) PNAS — \"Slowing down as an early warning signal for abrupt climate change\"; empirical AR1 rise before 8 past climate transitions\n"
        },
        {
          "doi": "10.1371/journal.pcbi.1000402",
          "note": "Shew & Plenz (2013) PLOS Comput Biol — dynamic range, information capacity, and susceptibility are maximized at neural criticality\n"
        },
        {
          "doi": "10.1098/rstb.2012.0114",
          "note": "Lenton et al. (2012) Phil Trans R Soc B — framework and caveats for tipping-point early-warning indicators; statistical robustness tests\n"
        }
      ],
      "last_reviewed": "2026-05-05",
      "file": "cross-domain/neuroscience-climate/b-neural-criticality-climate-tipping.yaml"
    },
    {
      "id": "b-cortical-hierarchy-predictive-coding",
      "title": "The hierarchical organisation of the cortex implements approximate Bayesian inference: higher areas send predictions (priors) downward and receive prediction errors (likelihood signals) upward, minimising free energy (surprise) in a generative model of sensory inputs — the predictive coding framework of Rao & Ballard (1999) and Friston's free energy principle.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "proposed",
      "bridge_claim": "Hierarchical Bayesian inference requires propagating predictions from high- level models downward and prediction errors from low-level observations upward. Rao & Ballard (1999) showed that a two-level cortical model where V1 predicts the output of the retina (top-down prediction) and sends residual ",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-mismatch-negativity-bayesian-precision-prediction-error"
      ],
      "communication_gap": "The predictive coding framework bridges computational Bayesian inference (AI literature) and laminar cortical neurophysiology (neuroscience literature). Friston's free energy principle (published in Trends in Cognitive Sciences and Nature Reviews Neuroscience) has been critiqued as unfalsifiable; its specific laminar-circuit predictions are tested in a separate experimental literature that does not always engage with the formal Bayesian framework.\n",
      "translation_table": [
        {
          "field_a_term": "Top-down cortical connection (deep layers → lower areas)",
          "field_b_term": "Prior/prediction message in belief propagation",
          "note": "Deep layer projections carry generative model predictions; implement the prior P(z|θ)"
        },
        {
          "field_a_term": "Superficial layer prediction error neurons (mismatch response)",
          "field_b_term": "Likelihood/evidence message propagated up the hierarchy",
          "note": "Mismatch negativity and oddball responses = large prediction error from violated prior"
        },
        {
          "field_a_term": "Gain modulation by attention (increasing precision of prediction errors)",
          "field_b_term": "Precision weighting in Bayesian inference (inverse variance of likelihood)",
          "note": "Attention increases the precision (1/σ²) of sensory signals; equivalent to prior sharpening"
        },
        {
          "field_a_term": "Cortical hierarchy depth (V1→V2→V4→IT→PFC)",
          "field_b_term": "Depth of the Bayesian generative model hierarchy",
          "note": "Each cortical area = one level of the generative model; depth enables abstract representation"
        }
      ],
      "references": [
        {
          "doi": "10.1038/nn.2387",
          "note": "Rao & Ballard (1999) Nature Neuroscience – predictive coding in the visual cortex; hierarchical model"
        },
        {
          "doi": "10.1098/rstb.2005.1622",
          "note": "Friston (2005) Phil. Trans. R. Soc. B – a theory of cortical responses; free energy principle"
        },
        {
          "doi": "10.1016/j.neuron.2015.09.030",
          "note": "Keller & Mrsic-Flogel (2018) Neuron – predictive processing: a canonical cortical computation"
        },
        {
          "doi": "10.1038/s41583-019-0275-5",
          "note": "Friston – a free energy principle for a particular physics; mathematical foundations"
        }
      ],
      "last_reviewed": "2026-05-07",
      "file": "cross-domain/neuroscience-cognitive-science/b-cortical-hierarchy-predictive-coding.yaml"
    },
    {
      "id": "b-hippocampal-replay-sharp-wave-ripples",
      "title": "Hippocampal sharp-wave ripples (80-120 Hz oscillations during rest and slow-wave sleep) are the neural substrate of memory replay: compressed, time-reversed re-activation of awake experience sequences drives synaptic plasticity and memory consolidation in the neocortex\n",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "During rest and sleep, the hippocampus spontaneously reactivates waking experience sequences at 10-20× compressed timescale within 50-150 ms sharp-wave ripple events; this replay is bidirectional (forward and reverse) and drives spike-timing-dependent plasticity in hippocampal-cortical synapses, pro",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-sharp-wave-ripple-consolidation-reward-bias"
      ],
      "communication_gap": "Systems neuroscientists record hippocampal electrophysiology in rodents while cognitive scientists study human memory behaviorally; connecting rodent SWR physiology to human declarative memory consolidation requires bridging species, methods, and levels of analysis.\n",
      "translation_table": [
        {
          "field_a_term": "sharp-wave ripple event in CA1 (neuroscience)",
          "field_b_term": "compressed episodic memory reactivation trigger (cognitive science)",
          "note": "SWR events are the physiological substrate of the memory consolidation process posited in cognitive models"
        },
        {
          "field_a_term": "hippocampal place cell sequence reactivation (neuroscience)",
          "field_b_term": "episodic memory trace replay during offline consolidation (cognitive science)",
          "note": "Ordered place cell reactivation during SWRs maps onto the sequential structure of episodic memories"
        },
        {
          "field_a_term": "SWR-triggered cortical spindle coupling (neuroscience)",
          "field_b_term": "hippocampal-neocortical dialogue for systems consolidation (cognitive science)",
          "note": "SWR-spindle coupling transfers memory traces from hippocampus to distributed cortical storage"
        },
        {
          "field_a_term": "theta-gamma sequence during encoding (neuroscience)",
          "field_b_term": "working memory chunking and episodic encoding mechanism (cognitive science)",
          "note": "Theta-gamma nested oscillations organize spatial sequences that are later replayed during SWRs"
        }
      ],
      "references": [
        {
          "doi": "10.1038/s41586-019-1461-9",
          "note": "Joo & Frank (2018) - the hippocampal sharp wave-ripple in memory retrieval for immediate use and consolidation"
        },
        {
          "doi": "10.1038/nn.2307",
          "note": "Carr et al. (2011) - hippocampal replay in the awake state: a potential substrate for memory consolidation and retrieval"
        }
      ],
      "last_reviewed": "2026-05-07",
      "file": "cross-domain/neuroscience-cognitive-science/b-hippocampal-replay-sharp-wave-ripples.yaml"
    },
    {
      "id": "b-backpropagation-synaptic-plasticity",
      "title": "The backpropagation algorithm (Rumelhart et al. 1986) computes error gradients by the chain rule propagated backward through a network, while biological synaptic plasticity implements credit assignment by mechanisms (feedback alignment, predictive coding) that may approximate or equal backprop without requiring the biologically implausible weight transport step.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "proposed",
      "bridge_claim": "Backpropagation (Rumelhart, Hinton & Williams 1986) is an efficient algorithm for computing gradients of a loss function with respect to all parameters in a multilayer neural network via the chain rule: δ_l = (W_{l+1}^T δ_{l+1}) ⊙ f'(z_l). This requires backward propagation of error signals through ",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-predictive-coding-implements-backprop"
      ],
      "communication_gap": "The 1986 Rumelhart et al. Nature paper was written for AI researchers and did not engage with neuroscience. The biological plausibility literature grew in computational neuroscience journals (Neural Computation, PLOS Comput Biol) that deep learning engineers rarely read. The formal equivalence proofs (Sacramento 2018, Whittington 2019) are technically demanding and have not yet influenced experimental neuroscientists designing plasticity experiments.\n",
      "translation_table": [
        {
          "field_a_term": "backward error signal δ_l",
          "field_b_term": "prediction error signal in predictive coding (bottom-up residual)",
          "note": "top-down predictions generate residuals that propagate like backprop errors"
        },
        {
          "field_a_term": "weight transport (W^T in backward pass)",
          "field_b_term": "symmetric feedback connections (the biologically implausible step)",
          "note": "feedback alignment replaces W^T with random B; predictive coding uses separate pathways"
        },
        {
          "field_a_term": "learning rate × gradient (gradient descent step)",
          "field_b_term": "LTP/LTD magnitude determined by neuromodulator concentration"
        },
        {
          "field_a_term": "loss function L(output, target)",
          "field_b_term": "reward prediction error (dopaminergic signal)",
          "note": "RL with backprop maps onto actor-critic with dopaminergic credit assignment"
        },
        {
          "field_a_term": "multilayer network architecture",
          "field_b_term": "cortical hierarchy (V1 → V2 → V4 → IT in ventral visual stream)"
        },
        {
          "field_a_term": "gradient vanishing / exploding in deep networks",
          "field_b_term": "credit assignment problem across many synaptic layers in time"
        }
      ],
      "references": [
        {
          "doi": "10.1038/323533a0",
          "note": "Rumelhart et al. (1986) — Learning representations by back-propagating errors; Nature 323:533"
        },
        {
          "doi": "10.1038/ncomms13276",
          "note": "Lillicrap et al. (2016) — Random synaptic feedback weights support error backpropagation; Nat Commun 7:13276"
        },
        {
          "note": "Sacramento et al. (2018) — Dendritic cortical microcircuits approximate the backpropagation algorithm; NeurIPS"
        },
        {
          "doi": "10.1016/j.tics.2019.03.001",
          "note": "Whittington & Bogacz (2019) — Theories of error back-propagation in the brain; Trends Cogn Sci 23:235"
        }
      ],
      "last_reviewed": "2026-05-06",
      "file": "cross-domain/neuroscience-computer-science/b-backpropagation-synaptic-plasticity.yaml"
    },
    {
      "id": "b-contrastive-predictive-coding-x-multiview-self-supervised-learning",
      "title": "Contrastive predictive coding objectives bridge predictive processing narratives in neuroscience with multiview self-supervised representation learning in machine learning.",
      "source_domain": "",
      "target_domain": "",
      "status": "proposed",
      "bridge_claim": "Literature alignment at the objective level—CPC trains representations to predict latent summaries across temporal or view splits using contrastive classification; speculative analogy for biology—brains may implement nothing close to InfoNCE noise contrasts despite superficial predictive similarity.",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-predictive-cpc-loss-improves-downstream-transfer-under-shift"
      ],
      "communication_gap": "ML contrasts assume minibatch negatives while neuroscience experiments emphasize continuous streams and plasticity rules without minibatches.",
      "translation_table": [
        {
          "field_a_term": "latent summary prediction",
          "field_b_term": "predictive latent inference / forward models",
          "note": "Neural implementations remain debated."
        },
        {
          "field_a_term": "negative samples / noise contrast",
          "field_b_term": "distractor statistics / normalization circuits",
          "note": "Mapping is metaphorical without circuit identification."
        },
        {
          "field_a_term": "mutual information lower bounds",
          "field_b_term": "sensory compression objectives",
          "note": "Variational bounds differ from measurable neural information."
        }
      ],
      "references": [
        {
          "arxiv": "1807.03748",
          "note": "Contrastive Predictive Coding (Oord et al.)."
        }
      ],
      "last_reviewed": "2026-05-09",
      "file": "cross-domain/neuroscience-computer-science/b-contrastive-predictive-coding-x-multiview-self-supervised-learning.yaml"
    },
    {
      "id": "b-efficient-coding-hypothesis-x-information-bottleneck-representation-learning",
      "title": "Efficient coding ideas in sensory neuroscience share optimization language with information-bottleneck objectives used to train compressed latent representations in machine learning.",
      "source_domain": "",
      "target_domain": "",
      "status": "proposed",
      "bridge_claim": "Conceptual bridge (not a literal neural isomorphism): both traditions trade fidelity of retained information against complexity or redundancy constraints; speculative analogy for practice—IB-style objectives may approximate efficient-coding pressures only under explicit generative and noise models t",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-information-bottleneck-alignment-improves-neural-encoding-metrics"
      ],
      "communication_gap": "Fields disagree on whether mutual information estimates from finite neural recordings are comparable to variational IB bounds used in ML.",
      "translation_table": [
        {
          "field_a_term": "neural coding redundancy reduction",
          "field_b_term": "mutual-information bottleneck penalty",
          "note": "Analogous constraint shapes but different measurable quantities."
        },
        {
          "field_a_term": "metabolic cost / spike budget",
          "field_b_term": "rate-distortion / beta sweep parameter",
          "note": "Tradeoff knobs differ across substrates."
        },
        {
          "field_a_term": "natural scene statistics",
          "field_b_term": "training data distribution",
          "note": "Misspecified priors break naive transfers."
        }
      ],
      "references": [
        {
          "arxiv": "1612.00410",
          "note": "Deep variational information bottleneck framing commonly used in ML representation learning."
        }
      ],
      "last_reviewed": "2026-05-09",
      "file": "cross-domain/neuroscience-computer-science/b-efficient-coding-hypothesis-x-information-bottleneck-representation-learning.yaml"
    },
    {
      "id": "b-rl-intrinsic-motivation-x-novelty-information-gain-neuroscience",
      "title": "Reinforcement-learning intrinsic-motivation bonuses (count-based novelty, prediction-error curiosity, information-gain proxies) parallel neuroscience hypotheses that dopamine signals relate to expected future reward **and** reducible uncertainty — careful wording avoids claiming circuit-level isomorphism between TD-learning δ errors and midbrain dopamine in every paradigm.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "proposed",
      "bridge_claim": "Algorithmic intrinsic rewards encourage exploration by rewarding visits to rarely experienced states or large forward-model prediction errors; neuroscience proposes exploratory behaviors arise when agents seek stimuli that reduce uncertainty or improve internal models. Formal overlaps appear at the ",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-count-novelty-scales-bayesian-information-gain-proxy"
      ],
      "communication_gap": "Deep RL papers optimize benchmarks while neuroscience emphasizes lesion studies and species differences — shared mathematics appears in reviews but rarely with aligned datasets across algorithm classes and neural recordings.\n",
      "translation_table": [
        {
          "field_a_term": "pseudocount / novelty bonus shaping reward R'(s,a)",
          "field_b_term": "novelty-driven exploratory modulation of choice probability",
          "note": "Phenomenological similarity — neural substrate mapping contested across labs."
        },
        {
          "field_a_term": "forward-model prediction error ‖φ(s') − f(s,a)‖",
          "field_b_term": "sensory prediction errors in hierarchical predictive coding theories",
          "note": "Useful modeling metaphor — scale and stationarity assumptions differ."
        },
        {
          "field_a_term": "empowerment / mutual-information maximization objectives",
          "field_b_term": "active inference “epistemic value” terms (expected precision-weighted information gain)",
          "note": "Shared Bayesian vocabulary with distinct algorithmic commitments."
        }
      ],
      "references": [
        {
          "arxiv": "1705.05363",
          "note": "Pathak et al. (2017) — curiosity-driven exploration via intrinsic prediction error in feature space (ICML preprint)."
        },
        {
          "doi": "10.1109/tamd.2010.2056368",
          "note": "Schmidhuber (2010) — formal theory of creativity, fun, and intrinsic motivation (IEEE Trans Autonomous Mental Development)."
        }
      ],
      "last_reviewed": "2026-05-07",
      "file": "cross-domain/neuroscience-computer-science/b-rl-intrinsic-motivation-x-novelty-information-gain-neuroscience.yaml"
    },
    {
      "id": "b-synaptic-tagging-x-cache-coherence-writeback-analogy",
      "title": "Synaptic tagging and capture lets a transient “tag” mark recently activated synapses so later protein-synthesis–dependent consolidation can selectively stabilize them — computer architects use cache coherence protocols (MESI-family) so transient writes can later propagate consistently across cores — **this bridge is an intentional pedagogical analogy**, not a claim of molecular isomorphism between neurons and silicon.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "proposed",
      "bridge_claim": "Both domains confront temporally separated events (weak tetanus vs protein synthesis arrival; write hits vs directory responses) that must reconcile local state with global consistency — tagging resembles marking a cache line dirty pending write-back; capture resembles satisfying coherence before ot",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-tag-decay-timescale-vs-write-buffer-lifetime-correlation-classroom-only"
      ],
      "communication_gap": "Neuroscientists rarely frame plasticity using cache coherence vocabulary; computer architects seldom study synaptic tagging despite parallel distributed-memory metaphors — deliberate analogy labeling prevents overclaiming hardware-neural identity.\n",
      "translation_table": [
        {
          "field_a_term": "Synaptic tag / eligibility trace at potentiated synapses",
          "field_b_term": "Dirty bit / transient pending-write metadata on a cache line",
          "note": "**Analogy only** — biology uses biochemistry; caches use directory bits."
        },
        {
          "field_a_term": "Late-phase plasticity / protein synthesis enabling consolidation",
          "field_b_term": "Write-back or invalidate completing global memory consistency",
          "note": "Shared temporal separation between fast marking and slower stabilization."
        },
        {
          "field_a_term": "Synapse-specific capture vs heterosynaptic spillover risks",
          "field_b_term": "False sharing / coherence invalidation blast radius",
          "note": "Both disciplines worry about unintended coupling between neighboring units."
        }
      ],
      "references": [
        {
          "doi": "10.1038/385533a0",
          "note": "Frey & Morris (1997) — synaptic tagging and long-term potentiation"
        },
        {
          "doi": "10.1145/77606.77607",
          "note": "Herlihy & Wing (1990) — linearizability as correctness condition for concurrent objects"
        }
      ],
      "last_reviewed": "2026-05-07",
      "file": "cross-domain/neuroscience-computer-science/b-synaptic-tagging-x-cache-coherence-writeback-analogy.yaml"
    },
    {
      "id": "b-hysteresis-loop-area-x-neural-fatigue-recovery-dynamics",
      "title": "Hysteresis-loop area metrics can transfer from nonlinear control systems to neural fatigue-recovery tracking.",
      "source_domain": "",
      "target_domain": "",
      "status": "proposed",
      "bridge_claim": "Speculative analogy: Hysteresis-loop area metrics can transfer from nonlinear control systems to neural fatigue-recovery tracking.",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-hysteresis-loop-biomarkers-predict-neurofatigue-recovery-lag"
      ],
      "communication_gap": "Domain-specific vocabularies and benchmark conventions obscure transferable mathematical structure.",
      "translation_table": [],
      "references": [
        {
          "doi": "10.1017/S0962492910000061",
          "note": "Reviewed tools for nonlinear dynamical diagnostics and stability interpretation."
        }
      ],
      "last_reviewed": "2026-05-08",
      "file": "cross-domain/neuroscience-control-theory/b-hysteresis-loop-area-x-neural-fatigue-recovery-dynamics.yaml"
    },
    {
      "id": "b-motor-control-internal-models",
      "title": "The brain implements forward and inverse internal models for motor control that are mathematically identical to the Kalman filter and Linear Quadratic Regulator (LQR) of control engineering; the cerebellum implements forward model prediction while the motor cortex implements inverse model control, bridging neuroscience and optimal control theory.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "The brain implements internal models (forward and inverse models) for motor control. Forward model: given efference copy of motor command u, predict sensory outcome ŷ = f(u). Inverse model: given desired outcome y*, compute required command u* = f⁻¹(y*). The cerebellum implements forward models for ",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-cerebellum-lqr-forward-model-implementation"
      ],
      "communication_gap": "Motor neuroscientists and control engineers work in separate literatures (Nature Neuroscience vs IEEE Transactions on Automatic Control) and rarely cross-cite. The language barrier is substantial: engineers use state-space notation while neuroscientists use biological terminology, obscuring mathematical equivalences.\n",
      "translation_table": [
        {
          "field_a_term": "efference copy (copy of motor command)",
          "field_b_term": "control input u in state-space model ẋ = Ax + Bu",
          "note": "efference copy is the biological u; used by forward model for prediction"
        },
        {
          "field_a_term": "forward model (cerebellum): ŷ = f(u)",
          "field_b_term": "Kalman filter prediction step: x̂_{k|k-1} = Ax̂_{k-1} + Bu_k",
          "note": "cerebellum implements one-step-ahead prediction of sensory state"
        },
        {
          "field_a_term": "inverse model (motor cortex + SMA): u* = f⁻¹(y*)",
          "field_b_term": "LQR control law: u* = -K x̂ (linear feedback of state estimate)",
          "note": "both compute control command from desired state; both are optimal under quadratic cost"
        },
        {
          "field_a_term": "climbing fiber error signal (inferior olive → cerebellum)",
          "field_b_term": "Kalman innovation: y_k - Cx̂_{k|k-1} (prediction error)",
          "note": "climbing fiber encodes mismatch between predicted and actual sensory outcome"
        },
        {
          "field_a_term": "Purkinje cell firing rate adaptation (LTD)",
          "field_b_term": "Kalman gain update: K_k = P_{k|k-1}C'(CP_{k|k-1}C'+R)^{-1}",
          "note": "cerebellar learning adjusts the internal model parameters analogous to Kalman gain"
        }
      ],
      "references": [
        {
          "doi": "10.1126/science.280.5368.1880",
          "note": "Wolpert et al. (1998) Science 280:1880 — cerebellar internal models"
        },
        {
          "doi": "10.1038/nn1116",
          "note": "Todorov & Jordan (2002) Nat Neurosci 5:1226 — optimal feedback control of motor tasks"
        },
        {
          "doi": "10.1016/S0959-4388(99)00016-5",
          "note": "Kawato (1999) Curr Opin Neurobiol 9:718 — internal model hypothesis"
        },
        {
          "doi": "10.1523/JNEUROSCI.14.5.3208.1994",
          "note": "Shadmehr & Mussa-Ivaldi (1994) J Neurosci 14:3208 — learning dynamics of reaching"
        }
      ],
      "last_reviewed": "2026-05-06",
      "file": "cross-domain/neuroscience-control-theory/b-motor-control-internal-models.yaml"
    },
    {
      "id": "b-neural-diversity-ecosystem-stability",
      "title": "Neural circuit diversity and ecosystem stability — May's random matrix stability criterion governs both heterogeneous neural populations and biodiverse food webs",
      "source_domain": "",
      "target_domain": "",
      "status": "proposed",
      "bridge_claim": "The diversity-stability relationship in ecology (May 1972) maps precisely onto neural circuit diversity: heterogeneous neural populations are more robust to perturbation than homogeneous ones, just as biodiverse ecosystems are more stable than monocultures. May's stability criterion: a random ecolog",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-neural-diversity-stability-random-matrix-prediction"
      ],
      "communication_gap": "Ecologists and neuroscientists rarely read each other's literature despite sharing stability mathematics. May (1972) was written for ecologists; Sompolinsky et al. (1988) derived the same stability criterion for neural networks without citing May. The connection was partly bridged by Allesina & Tang (2012) who updated May's analysis with structured random matrices. The neuroscience community has not widely adopted ecological stability metrics.\n",
      "translation_table": [
        {
          "field_a_term": "community matrix A_ij (species interaction)",
          "field_b_term": "synaptic weight matrix W_ij (neural connectivity)",
          "note": "Both are random matrices; eigenspectrum determines stability of fixed points"
        },
        {
          "field_a_term": "connectance C (fraction of realized interactions)",
          "field_b_term": "connectivity p (fraction of connected neuron pairs)",
          "note": "Both appear in the stability criterion: λ_max ~ σ√(SC) or σ√(pN)"
        },
        {
          "field_a_term": "biodiversity (species richness S)",
          "field_b_term": "neural diversity (cell type heterogeneity)",
          "note": "More diverse communities / populations can support more complex dynamics"
        },
        {
          "field_a_term": "predator-prey balance (top-down control)",
          "field_b_term": "inhibitory interneuron control (E/I balance)",
          "note": "Both prevent runaway growth; loss of predators → prey explosion = loss of inhibition → seizure"
        },
        {
          "field_a_term": "trophic cascade (keystone species removal)",
          "field_b_term": "disinhibition cascade (PV interneuron loss)",
          "note": "Remove a single control node → cascade instability in both food web and circuit"
        },
        {
          "field_a_term": "May's stability criterion σ√(SC) < 1",
          "field_b_term": "E/I balance criterion for cortical circuits",
          "note": "Both are conditions on interaction strength × connectivity for linear stability"
        }
      ],
      "references": [
        {
          "doi": "10.1038/238413a0",
          "note": "May (1972) Nature 238:413 — random matrix stability criterion for ecological communities"
        },
        {
          "doi": "10.1016/j.cell.2011.09.026",
          "note": "Isaacson & Scanziani (2011) Cell 147:1449 — E/I balance in cortical circuits"
        },
        {
          "doi": "10.1038/nn.3134",
          "note": "Litwin-Kumar & Doiron (2012) Nat Neurosci 15:1498 — slow fluctuations from heterogeneous networks"
        },
        {
          "doi": "10.1038/nature10832",
          "note": "Allesina & Tang (2012) Nature 483:205 — stability criteria for complex ecological networks"
        }
      ],
      "last_reviewed": "2026-05-06",
      "file": "cross-domain/neuroscience-ecology/b-neural-diversity-ecosystem-stability.yaml"
    },
    {
      "id": "b-bci-signal-decoding",
      "title": "Brain-computer interfaces decode motor intentions from cortical population activity using linear decoders (Wiener filter) and Kalman state-space models — Fisher information in the neural population code sets the fundamental accuracy bound, connecting information theory to neural prosthetics engineering.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "BCIs decode intended movement from neural population activity recorded by electrode arrays. Linear decoding: ŷ = Wx + b where x ∈ R^N is the spike rate vector from N neurons, y is decoded kinematics (position, velocity), and W is found by least squares (Moore-Penrose pseudoinverse). The optimal line",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-neural-manifold-bci-decoder-generalization"
      ],
      "communication_gap": "Neural prosthetics engineering developed at the intersection of neuroscience and biomedical engineering. The fundamental information-theoretic formulation (Fisher information bound on decoding) is standard in statistics and signal processing but was applied to BCIs primarily by Shenoy's group at Stanford. Clinical neuroscientists developing BCI devices often implement Kalman filtering as a \"black box\" without connecting it to the Cramér-Rao information theory that explains why it is optimal. The channel capacity bound for BCIs remains underexplored relative to the rapidly advancing empirical decoding results.\n",
      "translation_table": [
        {
          "field_a_term": "Fisher information matrix I(θ) of neural population",
          "field_b_term": "fundamental decoding accuracy bound for any BCI algorithm"
        },
        {
          "field_a_term": "Cramér-Rao bound Var[θ̂] ≥ I^{-1}(θ)",
          "field_b_term": "minimum achievable decoding error in neural prosthetics"
        },
        {
          "field_a_term": "Wiener filter (optimal linear MMSE estimator)",
          "field_b_term": "linear decoder mapping spike rates to kinematic variables"
        },
        {
          "field_a_term": "Kalman filter (recursive Bayesian estimation)",
          "field_b_term": "continuous BCI decoder with temporal dynamics and noise model"
        },
        {
          "field_a_term": "signal subspace (PCA / factor analysis of neural activity)",
          "field_b_term": "low-dimensional neural manifold containing decoded movement information"
        },
        {
          "field_a_term": "channel capacity C = ½log₂(1 + SNR)",
          "field_b_term": "maximum BCI throughput in bits/second from a neural population"
        },
        {
          "field_a_term": "population vector P = Σ r_i c_i (preferred direction weighting)",
          "field_b_term": "Georgopoulos decoder — original motor cortex direction readout"
        }
      ],
      "references": [
        {
          "doi": "10.1126/science.3749885",
          "note": "Georgopoulos et al. (1986) Neuronal population coding of movement direction. Science 233:1416"
        },
        {
          "doi": "10.1016/j.neuron.2013.01.006",
          "note": "Shenoy et al. (2013) Cortical control of arm movements. Neuron 77:212"
        },
        {
          "doi": "10.1162/089976606775093882",
          "note": "Wu et al. (2006) Bayesian population decoding of motor cortical activity. Neural Comput 18:80"
        },
        {
          "doi": "10.1038/s41586-023-06502-7",
          "note": "Willett et al. (2023) A high-performance speech neuroprosthesis. Nature 620:1031"
        }
      ],
      "last_reviewed": "2026-05-06",
      "file": "cross-domain/neuroscience-engineering/b-bci-signal-decoding.yaml"
    },
    {
      "id": "b-computational-psychiatry-digital-biomarkers",
      "title": "Computational psychiatry uses Bayesian brain models to explain psychosis (aberrant salience — excess dopamine random salience attribution), depression (reduced positive learning rate), and OCD (stuck prior updating), while smartphone digital biomarkers provide continuous ecological monitoring that replaces episodic clinical assessment.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "Computational psychiatry applies mathematical models of brain computation to explain the mechanisms of psychiatric symptoms and guide treatment. The aberrant salience hypothesis (Kapur 2003): excess synaptic dopamine causes random salience attribution to irrelevant stimuli — delusions are the cognit",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-computational-psychiatry-aberrant-precision-antipsychotic-mechanism"
      ],
      "communication_gap": "Psychiatry is taught as a clinical discipline; computational neuroscience is a research speciality with little clinical integration. Engineers who build smartphone sensing systems and machine learning classifiers rarely collaborate with psychiatrists. The Bayesian brain framework is known to cognitive neuroscientists but has not entered psychiatric training or clinical practice. Regulatory agencies have not yet approved any digital biomarker for psychiatric use, limiting translation despite strong research evidence.\n",
      "translation_table": [
        {
          "field_a_term": "Bayesian precision (inverse variance of prior)",
          "field_b_term": "dopaminergic gain (D2 receptor occupancy modulates prediction error weighting)",
          "note": "Antipsychotics block D2 receptors — reducing aberrant salience by lowering the dopaminergic precision signal"
        },
        {
          "field_a_term": "learning rate α (temporal difference learning)",
          "field_b_term": "differential positive/negative reinforcement learning in depression",
          "note": "α⁺ < α⁻ in depression; ketamine restores α⁺ to normal within hours, predicting rapid antidepressant effect"
        },
        {
          "field_a_term": "ecological momentary assessment (EMA)",
          "field_b_term": "passive smartphone sensing (GPS, accelerometer, screen time, keystroke)",
          "note": "EMA captures real-world behaviour continuously vs. retrospective weekly clinic assessment"
        },
        {
          "field_a_term": "anomaly detection (signal processing)",
          "field_b_term": "digital biomarker early warning system for bipolar relapse",
          "note": "GPS radius of gyration drops days before manic episodes; accelerometer activity rises before mania, falls before depression"
        },
        {
          "field_a_term": "hierarchical Bayesian model",
          "field_b_term": "computational phenotyping — inferring latent psychiatric state from behavioural time series",
          "note": "Individual learning rate parameters estimated from behavioural tasks form the computational phenotype"
        }
      ],
      "references": [
        {
          "doi": "10.1176/appi.ajp.160.1.13",
          "note": "Kapur (2003) Am J Psychiatry 160:13 — aberrant salience hypothesis of psychosis; dopamine and delusion formation"
        },
        {
          "doi": "10.1093/schbul/sbv190",
          "note": "Adams et al. (2016) Schizophr Bull 42:971 — computational psychiatry of psychosis; precision-weighting model"
        },
        {
          "doi": "10.1371/journal.pcbi.1004695",
          "note": "Huys et al. (2016) PLoS Comput Biol 12:e1004695 — computational psychiatry: the brain as a hierarchical generative model"
        },
        {
          "doi": "10.1002/wps.20508",
          "note": "Torous et al. (2018) World Psychiatry 17:241 — smartphone digital biomarkers for mental health monitoring"
        }
      ],
      "last_reviewed": "2026-05-06",
      "file": "cross-domain/neuroscience-engineering/b-computational-psychiatry-digital-biomarkers.yaml"
    },
    {
      "id": "b-insect-navigation-path-integration",
      "title": "Insect path integration (dead reckoning) is a vector-based Kalman filter: the central complex accumulates velocity and angular signals to maintain a home-vector estimate that degrades with noise exactly as predicted by random-walk error accumulation",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "Desert ants (Cataglyphis) and honeybees maintain a home vector H=(r,θ) pointing back to the nest throughout a foraging excursion. The vector is updated by integrating velocity (from optic flow) and heading (from polarized light compass). Mathematically: H_{t+1} = H_t + Δx_t where Δx_t has noise ε_t.",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-insect-navigation-path-integration"
      ],
      "communication_gap": "Neuroethologists who study insect navigation analyze home vector errors and ring attractor dynamics without connecting to the robotics/control literature on dead reckoning and Kalman filtering. Roboticists who design dead-reckoning systems are unaware that evolution has solved the same problem in a few hundred neurons (Drosophila CX: ~500 cells), potentially offering algorithmic insights for ultra-efficient autonomous navigation.\n",
      "translation_table": [
        {
          "field_a_term": "ant home vector H_t (direction and distance to nest)",
          "field_b_term": "state estimate x̂_t in the dead-reckoning Kalman filter",
          "note": "Path integrator maintains estimate of current position relative to nest"
        },
        {
          "field_a_term": "central complex ring attractor heading bump",
          "field_b_term": "continuous attractor neural network encoding angular state variable",
          "note": "Bump position encodes heading; bump dynamics implement angular integration"
        },
        {
          "field_a_term": "optic flow signal (velocity estimate)",
          "field_b_term": "control input u_t corrupted by measurement noise in Kalman model",
          "note": "Optic flow gives noisy speed estimate; heading from polarized sky compass"
        },
        {
          "field_a_term": "home vector error σ·√L (grows with path length L)",
          "field_b_term": "random walk standard deviation from IID noise accumulation",
          "note": "Psychophysical experiments in ants confirm the √L error scaling"
        }
      ],
      "references": [
        {
          "doi": "10.1093/beheco/arh195",
          "note": "Müller & Wehner (1988) Path integration in desert ants, Cataglyphis fortis. PNAS 85:5287"
        },
        {
          "doi": "10.1038/s41586-019-1842-7",
          "note": "Stone et al. (2017) An anatomically constrained model for path integration in the bee brain. Curr Biol 27:3069"
        },
        {
          "doi": "10.1126/science.aan9930",
          "note": "Kim et al. (2019) Ring attractor dynamics in the Drosophila central brain. Science 356:849"
        }
      ],
      "last_reviewed": "2026-05-07",
      "file": "cross-domain/neuroscience-engineering/b-insect-navigation-path-integration.yaml"
    },
    {
      "id": "b-kalman-filter-x-brain-state-estimation",
      "title": "Kalman filtering — recursive Bayesian state estimation for linear-Gaussian dynamics — maps onto neural circuits that combine a forward prediction with a sensory correction, motivating tractable experimental tests in perception and motor control.",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "The Kalman filter alternates prediction using a dynamics model with an innovation update weighted by the Kalman gain, minimizing mean-squared estimation error under Gaussian assumptions. Canonical neural models of perception and sensorimotor integration (e.g., optimal feedback control sketches, pred",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-sensory-cortex-implements-approximate-kalman-updates"
      ],
      "communication_gap": "Control and robotics textbooks present Kalman filters as standard; systems neuroscience often uses related but differently named linear dynamical models without linking gains to Q/R ratios.",
      "translation_table": [
        {
          "field_a_term": "state vector x_t",
          "field_b_term": "latent stimulus or body state represented in cortex"
        },
        {
          "field_a_term": "process noise covariance Q",
          "field_b_term": "internal variability / drift in neural dynamics"
        },
        {
          "field_a_term": "measurement noise covariance R",
          "field_b_term": "sensory noise and ambiguity"
        },
        {
          "field_a_term": "Kalman gain K",
          "field_b_term": "effective feedforward sensory weighting"
        }
      ],
      "references": [
        {
          "doi": "10.1115/1.3662552",
          "note": "Kalman (1960) — discrete-time linear filtering"
        },
        {
          "doi": "10.1038/nn.2733",
          "note": "Representative discussion of Bayesian cue integration in perception (example bridge paper)"
        }
      ],
      "last_reviewed": "2026-05-07",
      "file": "cross-domain/neuroscience-engineering/b-kalman-filter-x-brain-state-estimation.yaml"
    },
    {
      "id": "b-leaky-if-neuron-x-rc-membrane-circuit",
      "title": "The leaky integrate-and-fire (LIF) subthreshold equation τ_m dV/dt = −(V − V_rest) + R I(t) is the same first-order linear ODE as charging a parallel RC circuit driven by current — capacitance stores charge while leak conductance provides dissipation — establishing direct electrophysiological–circuit metaphors used in neuromorphic engineering datasheets.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "Cell membrane lipid bilayer acts as capacitance C_m per area; ion channels provide conductances g giving τ_m = C_m/g. Subthreshold LIF ignores spike-generation nonlinearities but preserves low-pass filtering: voltage tracks low-frequency input currents while attenuating high-frequency noise — identi",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-leaky-if-neuron-x-rc-membrane-circuit"
      ],
      "communication_gap": "Neuroscience curricula sometimes teach LIF without explicit circuit diagrams except in neuromorphic tracks; electrical engineering students rarely connect RC labs to Hodgkin–Huxley reductions unless taking computational neuroscience electives.\n",
      "translation_table": [
        {
          "field_a_term": "Membrane time constant τ_m = C_m / g_leak",
          "field_b_term": "RC time constant τ = R C in equivalent circuit",
          "note": "Same exponential relaxation to steady state under step currents."
        },
        {
          "field_a_term": "Input current I injected at soma or dendrite compartment",
          "field_b_term": "Current source driving RC node voltage",
          "note": "Linear superposition holds subthreshold for both."
        },
        {
          "field_a_term": "Spike threshold nonlinearity (fire-and-reset)",
          "field_b_term": "Comparator / Schmitt trigger in silicon neuron circuits",
          "note": "Adds digital event output absent from passive RC but downstream digital logic restores hybrid modeling path."
        }
      ],
      "references": [
        {
          "doi": "10.1017/CBO9780511815706",
          "note": "Gerstner & Kistler — Spiking Neuron Models (RC integrate-and-fire derivation)"
        },
        {
          "doi": "10.1038/nn0603-533",
          "note": "Meffin et al. (2003) — neuromorphic silicon neuron contextual citation cluster"
        }
      ],
      "last_reviewed": "2026-05-07",
      "file": "cross-domain/neuroscience-engineering/b-leaky-if-neuron-x-rc-membrane-circuit.yaml"
    },
    {
      "id": "b-neural-control-theory",
      "title": "Biological motor control implements the same optimal stochastic control theory principles used in engineered controllers — minimising jerk or endpoint variance, Kalman filtering in the cerebellum, and efference-copy forward models — demonstrating that the nervous system is an optimal controller operating under signal-dependent noise.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "Flash & Hogan (1985, J Neurosci 5:1688) showed that human arm trajectories minimise the third derivative of position (jerk), generating smooth bell-shaped velocity profiles characteristic of minimum-jerk control. This was the first rigorous demonstration that biological motor control obeys an engine",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-cerebellum-kalman-prediction-error"
      ],
      "communication_gap": "Control engineers publishing in IEEE Transactions on Automatic Control and computational neuroscientists publishing in Neuron and J Neurosci operate in largely separate communities. The Wolpert group (Cambridge, now Columbia) and Todorov (now at Google DeepMind) bridge these communities, but most motor neuroscience papers do not cite the control theory literature, and most control engineers are unaware of the cerebellar Kalman filter evidence. Robotics is increasingly aware of biological motor control, but the flow is primarily from neuroscience to robotics, not bidirectional.\n",
      "translation_table": [
        {
          "field_a_term": "Kalman filter state estimate x̂ (control engineering)",
          "field_b_term": "cerebellar internal model of limb state"
        },
        {
          "field_a_term": "Kalman gain K (optimal weighting of prediction vs measurement)",
          "field_b_term": "sensorimotor integration weights (proprioception vs forward model)"
        },
        {
          "field_a_term": "process noise covariance Q",
          "field_b_term": "signal-dependent motor noise (σ ∝ u)"
        },
        {
          "field_a_term": "innovation / prediction error (control theory)",
          "field_b_term": "climbing fibre error signal to Purkinje cells"
        },
        {
          "field_a_term": "efference copy (neuroscience)",
          "field_b_term": "forward model prediction u → x̂ (control engineering)"
        },
        {
          "field_a_term": "mixture-of-experts controller (engineering)",
          "field_b_term": "MOSAIC architecture of paired cerebellar modules"
        },
        {
          "field_a_term": "minimum jerk / minimum variance cost function",
          "field_b_term": "smooth bell-shaped velocity profiles of human arm movements"
        }
      ],
      "references": [
        {
          "doi": "10.1523/JNEUROSCI.05-07-01688.1985",
          "note": "Flash & Hogan (1985) The coordination of arm movements — minimum jerk model, J Neurosci 5:1688"
        },
        {
          "doi": "10.1016/S0893-6080(98)00066-5",
          "note": "Wolpert & Kawato (1998) Multiple paired forward and inverse models for motor control, Neural Networks 11:1317"
        },
        {
          "doi": "10.1038/nn963",
          "note": "Todorov & Jordan (2002) Optimal feedback control as a theory of motor coordination, Nat Neurosci 5:1226"
        },
        {
          "doi": "10.1115/1.3662552",
          "note": "Kalman (1960) A new approach to linear filtering and prediction problems, J Basic Eng 82:35"
        },
        {
          "doi": "10.1126/science.1192588",
          "note": "Churchland et al. (2012) Neural population dynamics during reaching, Nature 487:51"
        }
      ],
      "last_reviewed": "2026-05-05",
      "file": "cross-domain/neuroscience-engineering/b-neural-control-theory.yaml"
    },
    {
      "id": "b-neuroprosthetics-adaptive-control",
      "title": "Neuroprosthetics closes the sensorimotor loop by decoding motor intention from neural populations via Kalman-filter and RNN decoders, delivering intracortical microstimulation sensory feedback, and using online adaptive algorithms to compensate neural drift — the Cramer-Rao bound on Fisher information in the neural code sets the fundamental decoding limit bridging neuroscience and control theory.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "Neuroprosthetics is the engineering discipline of closing the sensorimotor loop with a brain-machine interface — decoding neural signals as control commands for prosthetic limbs and feeding sensory information back to the brain. It requires integration of neural coding theory, optimal estimation the",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-manifold-hypothesis-m1-latent-dynamics-decoder-generalisation"
      ],
      "communication_gap": "Neuroprosthetics research spans neuroscience (neural coding, plasticity), engineering (signal processing, control theory, device design), and clinical medicine (neurosurgery, rehabilitation). The academic literature is split across Nature Neuroscience, IEEE Transactions on Neural Systems, and Lancet. Neural coding theory (Fisher information, population vector decoding) was developed in computational neuroscience largely independently of the control engineering tradition (Kalman filtering, adaptive estimation). BrainGate collaborations are unusual in combining these communities. Adaptive decoding methods that are standard in control engineering (recursive least squares, online EM) have only recently been applied to neural decoders, despite being optimal. The Cramér-Rao bound analysis of the neural code (Seung & Sompolinsky 1993) is well-known in computational neuroscience but rarely cited in BCI engineering papers.\n",
      "translation_table": [
        {
          "field_a_term": "Kalman filter (optimal linear state estimator)",
          "field_b_term": "neural decoder (motor intention → prosthetic command)",
          "note": "exact Bayesian posterior for linear-Gaussian model; dominant BCI decoder architecture"
        },
        {
          "field_a_term": "state-space model (x_{t+1} = Ax_t + w_t, y_t = Cx_t + q_t)",
          "field_b_term": "kinematic state (position/velocity) predicted from firing rate observations",
          "note": "C matrix is the neural tuning curve matrix; estimated during calibration session"
        },
        {
          "field_a_term": "intracortical microstimulation (ICMS)",
          "field_b_term": "artificial tactile sensation (closing the sensory feedback loop)",
          "note": "current pulse trains in S1 evoke localised percepts; frequency/amplitude encode intensity"
        },
        {
          "field_a_term": "online EM algorithm (adaptive decoder parameter update)",
          "field_b_term": "Bayesian recursive estimation for non-stationary neural tuning",
          "note": "same algorithm as online Kalman filter identification; prevents performance degradation"
        },
        {
          "field_a_term": "Fisher information J(θ) in neural population",
          "field_b_term": "Cramér-Rao bound on decoding accuracy",
          "note": "fundamental limit independent of decoder choice; set by neural code properties"
        },
        {
          "field_a_term": "LSTM recurrent neural network decoder",
          "field_b_term": "sequence model capturing trajectory dynamics in M1 population",
          "note": "captures non-Markovian dynamics and coarticulation that linear Kalman filter misses"
        },
        {
          "field_a_term": "electrode array impedance increase (glial encapsulation)",
          "field_b_term": "non-stationarity requiring adaptive decoder update",
          "note": "biotic tissue response (neuroscience) → engineering system drift → adaptive control need"
        }
      ],
      "references": [
        {
          "doi": "10.1038/nature11076",
          "note": "Hochberg et al. (2012) Nature 485:372 — BrainGate2 robotic arm control by tetraplegic patients"
        },
        {
          "doi": "10.1016/S0140-6736(12)61816-9",
          "note": "Collinger et al. (2013) Lancet 381:557 — high-performance neuroprosthetic arm"
        },
        {
          "doi": "10.1126/scitranslmed.aaf8083",
          "note": "Flesher et al. (2016) Sci Transl Med 8:361ra141 — ICMS sensory feedback for object discrimination"
        },
        {
          "doi": "10.1038/s41586-023-06139-4",
          "note": "Willett et al. (2023) Nature 620:1031 — 62 words/minute speech BCI via neural manifold decoding"
        }
      ],
      "last_reviewed": "2026-05-06",
      "file": "cross-domain/neuroscience-engineering/b-neuroprosthetics-adaptive-control.yaml"
    },
    {
      "id": "b-spike-coding-neuromorphic",
      "title": "Biological neurons communicate via discrete action potentials (spikes) at ~10 fJ/spike; neuromorphic chips (Intel Loihi, IBM TrueNorth) implement spiking neural networks in silicon at 3–4 orders of magnitude lower energy than GPU inference, bridging computational neuroscience to ultra-low-power AI hardware.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "proposed",
      "bridge_claim": "Biological neural computation uses action potentials (spikes): discrete, all-or-nothing pulses of ~100 mV amplitude and ~1 ms duration. Neurons transmit information via:\n1. RATE CODING: firing rate r(t) encodes stimulus intensity (Adrian 1926).\n   Information = ∫ r log r dt — the rate-coded Fisher i",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-neuromorphic-chips-edge-ai-energy-advantage"
      ],
      "communication_gap": "Computational neuroscientists who study spike coding rarely have the hardware design background to implement their models in silicon; chip designers rarely read neuroscience literature. Intel's Loihi team and IBM's TrueNorth team did bridge this gap, but most neuroscience spike coding research is never translated to hardware. Conversely, neuromorphic engineers often use simplified spiking neuron models that omit the dendritic, multi-compartment dynamics that are central to biological neural computation.\n",
      "translation_table": [
        {
          "field_a_term": "action potential (spike)",
          "field_b_term": "digital event in neuromorphic hardware",
          "note": "Both are discrete, stereotyped events; information is in timing/rate, not amplitude"
        },
        {
          "field_a_term": "firing rate r(t) [Hz]",
          "field_b_term": "spike count per time window in hardware",
          "note": "Rate coding maps to a simple histogram; temporal coding maps to precise timestamp registers"
        },
        {
          "field_a_term": "synaptic weight w_ij",
          "field_b_term": "configurable crossbar weight in neuromorphic SRAM",
          "note": "Biological synapse strength ↔ hardware weight register; STDP updates both"
        },
        {
          "field_a_term": "STDP learning rule",
          "field_b_term": "on-chip local learning in Loihi/TrueNorth",
          "note": "STDP is local (uses only pre- and post-synaptic spike times), making it hardware-implementable"
        },
        {
          "field_a_term": "neural population code",
          "field_b_term": "spike pattern across neuromorphic core array",
          "note": "Population coding spreads information across many neurons; maps to parallel core arrays"
        }
      ],
      "references": [
        {
          "doi": "10.1038/354515a0",
          "note": "Mahowald, M. & Douglas, R. (1991). A silicon neuron. Nature 354:515–518. — first neuromorphic silicon neuron"
        },
        {
          "doi": "10.1126/science.1254642",
          "note": "Merolla, P.A. et al. (2014). A million spiking-neuron integrated circuit with a scalable communication network. Science 345:668–673. (IBM TrueNorth)"
        },
        {
          "doi": "10.1109/MM.2018.112130359",
          "note": "Davies, M. et al. (2018). Loihi: A neuromorphic manycore processor with on-chip learning. IEEE Micro 38:82–99."
        },
        {
          "doi": "10.3389/fnins.2018.00774",
          "note": "Pfeiffer, M. & Pfeil, T. (2018). Deep learning with spiking neurons: Opportunities and challenges. Front. Neurosci. 12:774."
        }
      ],
      "last_reviewed": "2026-05-06",
      "file": "cross-domain/neuroscience-engineering/b-spike-coding-neuromorphic.yaml"
    },
    {
      "id": "b-glymphatic-cerebrospinal-fluid",
      "title": "The glymphatic system uses perivascular cerebrospinal fluid flow driven by arterial pulsatility and aquaporin-4 water channels to clear amyloid-β and tau from the brain — a fluid dynamics problem with direct Alzheimer's disease implications.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "The glymphatic system (Iliff et al. 2012) uses cerebrospinal fluid (CSF) flow along perivascular spaces (the Virchow-Robin spaces surrounding cerebral arteries) to clear metabolic waste products — including amyloid-β and tau, the aggregating proteins implicated in Alzheimer's disease — from brain pa",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-glymphatic-dysfunction-drives-amyloid-accumulation"
      ],
      "communication_gap": "Neuroscientists studying Alzheimer's disease rarely have fluid mechanics training; fluid dynamicists rarely study brain physiology. The glymphatic system is a rapidly growing field (first described 2012) with an underserved need for quantitative fluid mechanics modeling to translate cellular observations to tissue-scale clearance rates.\n",
      "translation_table": [
        {
          "field_a_term": "perivascular space (Virchow-Robin space)",
          "field_b_term": "porous channel geometry for CSF flow",
          "note": "Annular space between vessel wall and pia mater acts as CSF conduit"
        },
        {
          "field_a_term": "arterial pulsatility (cardiac cycle pressure waves)",
          "field_b_term": "driving pressure for perivascular CSF flow",
          "note": "Reduced pulsatility (aging, hypertension) impairs glymphatic function"
        },
        {
          "field_a_term": "aquaporin-4 (AQP4) water channels on astrocyte endfeet",
          "field_b_term": "hydraulic conductivity of brain parenchyma",
          "note": "AQP4 knockout mice show 70% reduction in interstitial solute clearance"
        },
        {
          "field_a_term": "extracellular space volume fraction (αe)",
          "field_b_term": "Darcy hydraulic conductivity K ∝ αe^3 (Kozeny-Carman)",
          "note": "Sleep-induced αe increase from 14% to 23% enhances K by ~3×"
        },
        {
          "field_a_term": "amyloid-β clearance rate (neuroscience)",
          "field_b_term": "solute advection rate by bulk interstitial flow (fluid dynamics)",
          "note": "Diffusion alone is too slow; bulk flow required for efficient clearance"
        }
      ],
      "references": [
        {
          "note": "Iliff et al. (2012) A paravascular pathway facilitates CSF flow through the brain parenchyma. Sci Transl Med 4:147ra111"
        },
        {
          "note": "Xie et al. (2013) Sleep drives metabolite clearance from the adult brain. Science 342:373-377"
        },
        {
          "note": "Nedergaard (2013) Garbage truck of the brain. Science 340:1529-1530"
        },
        {
          "note": "Mestre et al. (2018) Flow of cerebrospinal fluid is driven by arterial pulsations and is reduced in hypertension. Nat Commun 9:4878"
        }
      ],
      "last_reviewed": "2026-05-06",
      "file": "cross-domain/neuroscience-fluid-dynamics/b-glymphatic-cerebrospinal-fluid.yaml"
    },
    {
      "id": "b-neurolyme-neuroinflammation",
      "title": "Lyme neuroborreliosis links blood-brain barrier biology (neuroscience) to TLR-mediated cytokine signaling (immunology) through a BBB-crossing and neuroinflammation cascade that can become self-sustaining after bacterial clearance.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "proposed",
      "bridge_claim": "Lyme neuroborreliosis (LNB) requires understanding at two levels that belong to different research communities. Neuroscience side: Borrelia crosses the blood-brain barrier (BBB) via a Trojan-horse mechanism — infecting perivascular macrophages or transcytosing directly through brain endothelial cell",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-ptlds-neuroinflammation-il6-blockade"
      ],
      "communication_gap": "Lyme neuroborreliosis is primarily managed by infectious disease specialists and neurologists who focus on antibiotic treatment. Neuroimmunologists who study self-sustaining neuroinflammation (from MS, TBI, and neurodegeneration research) rarely attend Lyme disease conferences. The concept of post-infectious self-sustaining astrogliosis is well-established in the neurodegeneration literature (Sofroniew 2015) but has not been formally tested in PTLDS. This communication gap may be directly responsible for the lack of anti-inflammatory treatment trials in PTLDS over the past 20 years.\n",
      "translation_table": [
        {
          "field_a_term": "BBB tight-junction permeability increase (neuroscience)",
          "field_b_term": "CNS immune surveillance access (immunology)",
          "note": "Increased BBB permeability enables CNS infiltration by peripheral immune cells, amplifying neuroinflammation"
        },
        {
          "field_a_term": "Microglial M1 activation state (neuroscience)",
          "field_b_term": "TLR2/NFκB-driven pro-inflammatory cytokine production (immunology)",
          "note": "Microglial activation is the cellular substrate; TLR2/NFκB is the molecular mechanism — rarely described together"
        },
        {
          "field_a_term": "Reactive astrogliosis (neuroscience)",
          "field_b_term": "IL-6/STAT3-driven glial scar formation (immunology)",
          "note": "Astrogliosis is driven by IL-6 signaling (JAK/STAT3); blocking IL-6R is testable as therapy"
        },
        {
          "field_a_term": "Hippocampal neurogenesis suppression (neuroscience)",
          "field_b_term": "TNF-α-mediated neural progenitor inhibition (immunology)",
          "note": "TNF-α directly suppresses hippocampal neural progenitor proliferation — links cytokine excess to cognitive symptoms"
        },
        {
          "field_a_term": "CXCL13 in CSF (clinical neurology biomarker)",
          "field_b_term": "B-cell chemoattractant / intrathecal B-cell response marker (immunology)",
          "note": "CXCL13 bridges the clinical neurology and immunology communities as the most diagnostically useful LNB biomarker"
        }
      ],
      "references": [
        {
          "doi": "10.1186/s12974-015-0312-5",
          "note": "Ramesh et al. (2015) Inhibition of TLR2 and TLR4 signaling reduces microglial and astrocyte activation and Borrelia burgdorferi-induced CNS inflammation. J Neuroinflammation 12:102."
        },
        {
          "doi": "10.1212/WNL.0b013e31816c8a43",
          "note": "Fallon et al. (2008) A randomized, placebo-controlled trial of repeated IV antibiotic therapy for Lyme encephalopathy. Neurology 70:992–1003."
        },
        {
          "doi": "10.2353/ajpath.2009.080955",
          "note": "Bernardino et al. (2009) Infection-induced and antibody-mediated outer surface protein C seroconversion in Borrelia burgdorferi-infected mice. Am J Pathol 174:1087–1096."
        },
        {
          "doi": "10.3389/fmed.2013.00057",
          "note": "Aucott et al. (2013) Post-treatment Lyme disease syndrome symptomatology and the impact on life functioning. Front Med 7:57."
        }
      ],
      "last_reviewed": "2026-05-06",
      "file": "cross-domain/neuroscience-immunology/b-neurolyme-neuroinflammation.yaml"
    },
    {
      "id": "b-neural-coding-channel-capacity",
      "title": "Sensory neurons as Shannon information channels — efficient coding and neural channel capacity",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "The nervous system encodes stimuli as spike trains — discrete all-or-none action potentials — which can be analysed as Shannon communication channels. The channel capacity C = B log₂(1 + S/N) bounds the mutual information between stimulus and neural response, where B is the bandwidth (maximum firing",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-cortical-hierarchies-as-predictive-coding-stack"
      ],
      "communication_gap": "Barlow's 1961 proposal was in a book chapter (Sensory Communication) largely unread outside neuroscience. Information theory was being developed by engineers at Bell Labs simultaneously with no awareness of Barlow's parallel work. The mathematical connection was made explicit only in the 1990s with direct measurements by Strong, Fairhall and colleagues. Today computational neuroscience routinely uses information theory but most neuroscientists are not trained in Shannon theory, and most information theorists are unaware of the neural coding literature.\n",
      "translation_table": [
        {
          "field_a_term": "Shannon channel capacity C = B log₂(1+S/N)",
          "field_b_term": "maximum information rate of a single neuron (~3 bits/spike)",
          "note": "Measured directly from spike train statistics; typical values 1-4 bits/spike"
        },
        {
          "field_a_term": "channel bandwidth B",
          "field_b_term": "maximum neural firing rate (~1 kHz, limited by refractory period)",
          "note": "The refractory period (~1 ms) is the physiological bandwidth constraint"
        },
        {
          "field_a_term": "channel noise",
          "field_b_term": "trial-to-trial variability (Fano factor, spike jitter)",
          "note": "Poisson-like noise in spike generation; limits information transmission"
        },
        {
          "field_a_term": "source entropy H(stimulus)",
          "field_b_term": "natural image / sound statistics",
          "note": "Efficient coding hypothesis: neurons adapt to the statistics of their natural stimulus environment"
        },
        {
          "field_a_term": "data compression / redundancy reduction",
          "field_b_term": "decorrelation / whitening in early visual system",
          "note": "Lateral inhibition removes spatial correlations; center-surround receptive fields implement whitening"
        },
        {
          "field_a_term": "mutual information I(X;Y)",
          "field_b_term": "neural information transfer (bits/s measured via direct method)",
          "note": "Strong et al. (1998) first measured this directly in H1 neuron of the fly"
        }
      ],
      "references": [
        {
          "note": "Barlow (1961) Possible principles underlying the transformation of sensory messages. In Sensory Communication (MIT Press)",
          "url": "https://mitpress.mit.edu/9780262691840/"
        },
        {
          "note": "Shannon (1948) A Mathematical Theory of Communication. Bell Syst Tech J 27:379",
          "doi": "10.1002/j.1538-7305.1948.tb01338.x"
        },
        {
          "doi": "10.1103/PhysRevLett.80.197",
          "note": "Strong et al. (1998) Phys Rev Lett 80:197 — direct measurement of neural information transmission"
        },
        {
          "doi": "10.1038/35024638",
          "note": "Fairhall et al. (2001) Nature 412:787 — adaptation maximises information near channel capacity"
        }
      ],
      "last_reviewed": "2026-05-06",
      "file": "cross-domain/neuroscience-information-theory/b-neural-coding-channel-capacity.yaml"
    },
    {
      "id": "b-openalex-info-theory-intrinsic-motivation",
      "title": "Intrinsic motivation and autonomy as defined in self-determination theory are operationalisable as information-theoretic quantities — specifically, empowerment (the maximum mutual information between an agent's actions and their future states) and free-energy minimization — providing a neurocomputational mechanism for why autonomy need satisfaction predicts psychological well-being.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "proposed",
      "bridge_claim": "Ryan and Deci (2000, 27 k citations) established that intrinsic motivation, competence, and autonomy are fundamental psychological needs whose satisfaction predicts well-being. Information theory and computational neuroscience provide a mechanistic substrate: intrinsic motivation corresponds to the ",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-autonomy-need-empowerment-maximization"
      ],
      "communication_gap": "Psychologists and SDT researchers rarely engage with the information-theoretic literature; computational neuroscientists working on free energy and empowerment rarely read Journal of Personality and Social Psychology. The SDT constructs are phenomenologically rich but lack mechanistic formulation; the information-theoretic constructs are precise but lack grounding in human motivational phenomenology.\n",
      "translation_table": [
        {
          "field_a_term": "channel capacity I(A; S')",
          "field_b_term": "empowerment (measure of intrinsic motivation)",
          "note": "Maximum mutual information between actions and reachable future states"
        },
        {
          "field_a_term": "surprisal H[S | model] (free energy)",
          "field_b_term": "competence need frustration (gap between actual and expected performance)",
          "note": "Minimising surprisal = satisfying competence need by improving the predictive model"
        },
        {
          "field_a_term": "entropy H[A] of action distribution",
          "field_b_term": "degree of autonomy (self-determination vs. external control)",
          "note": "High action entropy = uninhibited self-direction; low entropy = controlled, scripted behavior"
        },
        {
          "field_a_term": "mutual information I(A; Reward)",
          "field_b_term": "extrinsic motivation signal",
          "note": "When actions become instrumental for external reward, entropy collapses — the crowding-out effect"
        },
        {
          "field_a_term": "information bottleneck representation",
          "field_b_term": "competence schema (compressed world model supporting efficient action)",
          "note": "Intrinsically motivated agents compress observations into maximally predictive representations"
        }
      ],
      "references": [
        {
          "doi": "10.1037//0003-066x.55.1.68",
          "note": "Ryan & Deci (2000) Self-determination theory — 27,411 citations; primary reference"
        },
        {
          "doi": "10.1016/j.neuron.2013.09.038",
          "note": "Friston et al. (2012) — active inference and the free energy principle"
        },
        {
          "doi": "10.48550/arxiv.0710.4919",
          "note": "Klyubin et al. (2008) — empowerment as a universal agent-centric measure of control"
        }
      ],
      "last_reviewed": "2026-05-07",
      "file": "cross-domain/neuroscience-information-theory/b-openalex-info-theory-intrinsic-motivation.yaml"
    },
    {
      "id": "b-predictive-coding-grammar",
      "title": "Friston's free-energy / predictive coding framework for hierarchical neural inference is mathematically equivalent to probabilistic hierarchical phrase structure grammar: prediction error in neural processing equals surprisal in syntactic processing, and precision-weighting equals attention over syntactic dependencies.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "proposed",
      "bridge_claim": "Friston's free-energy principle (2010) proposes that the brain is a hierarchical generative model that minimizes variational free energy F = KL[q(h)||p(h|s)] ≈ complexity - accuracy. At each level, top-down predictions are compared with bottom-up sensory signals; the mismatch (prediction error δ = s",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-surprisal-n400-mismatch-equivalence"
      ],
      "communication_gap": "Computational neuroscience publications on predictive coding and free energy (Friston lab, Trends in Cognitive Sciences, PLOS Computational Biology) and formal linguistics / psycholinguistics publications (Language, Cognition, Journal of Memory and Language) occupy almost entirely disjoint citation networks. The surprisal theory of reading time is widely cited in psycholinguistics but rarely connected to free-energy minimization. Clark's (2013) \"Whatever next?\" synthesis reached cognitive science but has had limited uptake in formal linguistics. The mathematics is identical, but disciplinary vocabulary (precision vs. attention, free energy vs. surprisal, generative model vs. grammar) prevents recognition of the equivalence.\n",
      "translation_table": [
        {
          "field_a_term": "Generative model P(s|h) in predictive coding hierarchy",
          "field_b_term": "Probabilistic phrase structure grammar P(word | syntactic context)",
          "note": "Both define the prior that generates expected inputs from latent structure"
        },
        {
          "field_a_term": "Prediction error δ = s - μ (sensory signal minus prediction)",
          "field_b_term": "Surprisal = -log P(w_t | w_{1:t-1})",
          "note": "Both quantify the unexpectedness of an input; both drive belief updating"
        },
        {
          "field_a_term": "Precision-weighted prediction error π·δ",
          "field_b_term": "Attention-weighted processing of syntactic dependencies",
          "note": "Precision controls the gain on error signals; attention selects dependency arcs"
        },
        {
          "field_a_term": "Variational free energy F = complexity - accuracy",
          "field_b_term": "MDL grammar cost = description length + encoding cost",
          "note": "Formally equivalent trade-off between model complexity and empirical fit"
        },
        {
          "field_a_term": "Hierarchical latent causes (h₁, h₂, ...) in generative model",
          "field_b_term": "Hierarchical phrase structure (morpheme → word → phrase → clause)",
          "note": "The linguistic hierarchy is the natural decomposition of the neural hierarchy"
        },
        {
          "field_a_term": "Active inference — acting to confirm predictions (minimize free energy)",
          "field_b_term": "Syntactic priming — prior usage biases subsequent structural choices",
          "note": "Both represent the motor/production analog of the perceptual/parsing direction"
        }
      ],
      "references": [
        {
          "doi": "10.1038/nrn2787",
          "note": "Friston (2010) Nat Rev Neurosci — \"The free-energy principle: a unified brain theory?\"; foundational statement of variational free energy and predictive coding\n"
        },
        {
          "url": "https://aclanthology.org/N01-1021",
          "note": "Hale (2001) NAACL — \"A probabilistic Earley parser as a psycholinguistic model\"; introduces surprisal as cognitive difficulty measure for syntactic processing\n"
        },
        {
          "doi": "10.1016/j.cognition.2007.05.006",
          "note": "Levy (2008) Cognition — \"Expectation-based syntactic comprehension\"; surprisal predicts reading times better than memory-based alternatives across 5 datasets\n"
        },
        {
          "doi": "10.1017/S0140525X12000477",
          "note": "Clark (2013) Behav Brain Sci — \"Whatever next? Predictive brains, situated agents, and the future of cognitive science\"; links predictive coding to language\n"
        },
        {
          "doi": "10.1016/j.tics.2018.02.002",
          "note": "Kuperberg & Jaeger (2016) Lang Cogn Neurosci — \"What do we mean by prediction in language comprehension?\"; empirical review bridging predictive processing and linguistics\n"
        }
      ],
      "last_reviewed": "2026-05-05",
      "file": "cross-domain/neuroscience-linguistics/b-predictive-coding-grammar.yaml"
    },
    {
      "id": "b-connectome-graph-laplacian-spectral",
      "title": "Connectome topology encodes functional brain states via graph Laplacian eigenspectra: the spectral gap predicts synchronization capacity and network segregation",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "The connectome—the complete wiring diagram of neural connections—is a weighted undirected graph G=(V,E,W) whose Laplacian L=D-W has eigenvalues 0=λ₁≤λ₂≤...≤λₙ. The algebraic connectivity λ₂ (Fiedler value) quantifies synchronization capacity: large λ₂ implies rapid information spreading, while small",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-connectome-graph-laplacian-spectral"
      ],
      "communication_gap": "Neuroimaging researchers use tractography and graph metrics (clustering coefficient, path length) without applying the full spectral machinery from algebraic graph theory. The Fiedler value and spectral gap are standard tools in network mathematics but rarely appear in neuroscience journal articles, partly because tractography data require thresholding decisions that affect eigenspectra, creating methodological uncertainty.\n",
      "translation_table": [
        {
          "field_a_term": "white-matter fiber tract weights W_{ij}",
          "field_b_term": "edge weights in the symmetric weighted graph adjacency matrix",
          "note": "Diffusion MRI tractography gives W_{ij}; graph theory then applies directly"
        },
        {
          "field_a_term": "resting-state functional network (DMN, visual, motor)",
          "field_b_term": "spectral community from Fiedler eigenvectors of graph Laplacian",
          "note": "Spectral clustering using first k eigenvectors of L partitions connectome into functional communities"
        },
        {
          "field_a_term": "neural synchronization capacity",
          "field_b_term": "algebraic connectivity λ₂ (Fiedler value)",
          "note": "λ₂ determines the rate of consensus in diffusive processes on the graph"
        },
        {
          "field_a_term": "long-range white-matter hub regions",
          "field_b_term": "nodes with high eigenvector centrality in the graph Laplacian",
          "note": "Hubs have disproportionate influence on low-frequency Laplacian modes"
        }
      ],
      "references": [
        {
          "doi": "10.1371/journal.pcbi.1000395",
          "note": "Bullmore & Sporns (2009) Complex brain networks — graph theory analysis of structural and functional systems. Nature Rev Neurosci 10:186"
        },
        {
          "doi": "10.1093/brain/awq263",
          "note": "Honey et al. (2010) Can structure predict function in the human brain? Brain 133:2121"
        },
        {
          "doi": "10.1016/j.neuroimage.2013.05.041",
          "note": "Zalesky et al. (2010) Whole-brain anatomical networks — graph spectral analysis. NeuroImage 54:1132"
        }
      ],
      "last_reviewed": "2026-05-07",
      "file": "cross-domain/neuroscience-mathematics/b-connectome-graph-laplacian-spectral.yaml"
    },
    {
      "id": "b-consciousness-integrated-information-theory-phi",
      "title": "Integrated Information Theory (IIT) proposes that consciousness corresponds to integrated information Φ — a measure of how much a system generates information above and beyond its parts — connecting neuroscience to information theory, statistical mechanics, and the mathematics of causal structure.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "proposed",
      "bridge_claim": "IIT (Tononi 2004, 2014) defines Φ as the minimum information generated by a system as a whole beyond its minimum information partition (MIP). Mathematically, Φ is a measure over a causal structure (directed graph with conditional probability tables): Φ = min_{partition P} D_KL(p(X|do(causes)) || p_p",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-phi-maximum-thalamocortical-consciousness-locus"
      ],
      "communication_gap": "Neuroscientists studying consciousness and mathematicians / information theorists studying integrated information rarely collaborate; IIT has generated significant debate but the mathematical community working on information geometry and causal inference rarely engages with the neuroscience consciousness literature, and vice versa.\n",
      "translation_table": [
        {
          "field_a_term": "subjective experience / qualia (neuroscience)",
          "field_b_term": "integrated information Φ of the neural causal structure (mathematics)",
          "note": "IIT's central axiom — experience IS identical to the cause-effect structure with maximal Φ"
        },
        {
          "field_a_term": "level of consciousness / anesthetic depth (neuroscience)",
          "field_b_term": "magnitude of Φ (information theory)",
          "note": "Zap-complexity (TMS-EEG) is an empirical proxy for Φ that tracks consciousness"
        },
        {
          "field_a_term": "posterior cortical hot zone (neuroscience)",
          "field_b_term": "maximal-Φ subgraph (NPC) (mathematics)",
          "note": "IIT predicts consciousness localizes to the brain's highest-Φ subgraph"
        },
        {
          "field_a_term": "feed-forward vs. recurrent connectivity (neuroscience)",
          "field_b_term": "zero vs. positive Φ (information theory)",
          "note": "Pure feed-forward systems have Φ = 0 — they are not conscious per IIT"
        }
      ],
      "references": [
        {
          "doi": "10.1186/1471-2202-5-42",
          "note": "Tononi (2004) — an information integration theory of consciousness"
        },
        {
          "doi": "10.1371/journal.pcbi.1003588",
          "note": "Tononi et al. (2016) — integrated information theory — from consciousness to its physical substrate"
        },
        {
          "doi": "10.7554/eLife.21926",
          "note": "Oizumi et al. (2014) — from the phenomenology to the mechanisms of consciousness (IIT 3.0)"
        }
      ],
      "last_reviewed": "2026-05-07",
      "file": "cross-domain/neuroscience-mathematics/b-consciousness-integrated-information-theory-phi.yaml"
    },
    {
      "id": "b-dendritic-computation-compartmental-models",
      "title": "Dendrites are not passive cables but active nonlinear computational units, and compartmental cable theory maps the spatially distributed voltage dynamics of a dendritic tree onto a system of coupled ordinary differential equations — making single neurons multi-layer neural networks with nonlinear dendritic basis functions as the hidden layer.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "proposed",
      "bridge_claim": "Classic computational neuroscience modeled neurons as point processors (integrate- and-fire), but dendritic recordings reveal that dendrites perform active computation: NMDA receptor activation creates plateau potentials (local regenerative events), voltage-gated channels produce dendritic spikes, a",
      "open_unknowns": [],
      "related_hypotheses": [],
      "communication_gap": "",
      "translation_table": [
        {
          "field_a_term": "cable equation V_x = r_m * r_a * d^2V/dx^2 + r_m * C * dV/dt",
          "field_b_term": "compartmental ODE system: C dV_i/dt = sum_j g_{ij}(V_j - V_i) + I_i",
          "note": "Exact equivalence — compartmental model is the spatially discretized cable equation"
        },
        {
          "field_a_term": "dendritic NMDA plateau potential",
          "field_b_term": "hidden unit with threshold nonlinearity",
          "note": "NMDA spike is all-or-none above threshold glutamate input — analogous to ReLU/sigmoid"
        },
        {
          "field_a_term": "electrotonic length lambda = sqrt(r_m / r_a)",
          "field_b_term": "effective coupling between compartments",
          "note": "Short lambda = weakly coupled compartments (independent computation); long lambda = integrated soma"
        },
        {
          "field_a_term": "somatic action potential threshold",
          "field_b_term": "output layer decision boundary",
          "note": "The soma sums dendritic contributions and fires if they exceed threshold"
        }
      ],
      "references": [
        {
          "doi": "10.1007/BF02459568",
          "note": "Rall (1967) — distinguishing theoretical synaptic potentials computed for different soma-dendritic distributions of synaptic input"
        },
        {
          "doi": "10.1016/j.neuron.2015.01.010",
          "note": "Poirazi & Papoutsi (2020) — illuminating dendritic function with computational models"
        },
        {
          "doi": "10.1126/science.7580517",
          "note": "Mainen & Sejnowski (1996) — influence of dendritic structure on firing pattern"
        }
      ],
      "last_reviewed": "2026-05-07",
      "file": "cross-domain/neuroscience-mathematics/b-dendritic-computation-compartmental-models.yaml"
    },
    {
      "id": "b-hopfield-attractor-memory",
      "title": "Hopfield networks (1982) store M memories as energy-function attractors with Hebbian weights; statistical mechanics (Amit-Gutfreund-Sompolinsky) gives capacity M_max≈0.14N; modern Hopfield networks (Ramsauer 2020) achieve exponential capacity exp(N/2) using log-sum-exp interaction — mathematically equivalent to the scaled dot-product attention mechanism in transformers, connecting associative memory theory directly to large language models.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "Hopfield networks (1982): N binary neurons sᵢ ∈ {-1,+1} with symmetric weights Wᵢⱼ = (1/N)Σ_μ ξᵐᵢ ξᵐⱼ (Hebb rule) and dynamics sᵢ(t+1) = sgn(Σⱼ Wᵢⱼsⱼ(t)). Energy E = -½Σᵢⱼ Wᵢⱼsᵢsⱼ decreases monotonically → convergence to attractors. Stored memories ξᵐ are the attractor fixed points. Amit-Gutfreund-S",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-modern-hopfield-transformer-attention-equivalence"
      ],
      "communication_gap": "The 1982 Hopfield paper was absorbed by the physics community (statistical mechanics analysis) and the neural network community but largely separated from the 2017 transformer revolution in deep learning. The mathematical equivalence between modern Hopfield networks and attention was published in 2020 (Ramsauer et al.) but has not yet widely penetrated neuroscience journals, even though it provides a biologically grounded theoretical framework for understanding transformer architectures.\n",
      "translation_table": [
        {
          "field_a_term": "Hopfield network synaptic weight matrix Wᵢⱼ",
          "field_b_term": "outer product of key-value pairs in transformer attention",
          "note": "Hebb rule Wᵢⱼ = ΣΞ_μᵢΞ_μⱼ is identical to key-value memory outer product"
        },
        {
          "field_a_term": "energy function E = -½ sᵀWs",
          "field_b_term": "negative log-sum-exp interaction function in modern Hopfield",
          "note": "classical energy is quadratic; modern Hopfield energy is -log(Σexp(βξ·s))"
        },
        {
          "field_a_term": "pattern completion (partial cue → full memory)",
          "field_b_term": "attention query finding the most similar key",
          "note": "both perform nearest-neighbor retrieval in the space of stored patterns"
        },
        {
          "field_a_term": "storage capacity M_max = 0.14N (classical)",
          "field_b_term": "softmax attention over all N tokens (one-shot perfect retrieval)",
          "note": "classical has capacity limitation; softmax attention retrieves any of exp(N/2) items"
        },
        {
          "field_a_term": "spurious attractors (spin glass states above capacity)",
          "field_b_term": "hallucination in large language models (blended pattern retrieval)",
          "note": "both arise when memory density exceeds the network's faithful retrieval limit"
        }
      ],
      "references": [
        {
          "doi": "10.1073/pnas.79.8.2554",
          "note": "Hopfield (1982) Neural networks and physical systems with emergent collective computational abilities. PNAS 79:2554–2558"
        },
        {
          "doi": "10.1103/PhysRevA.32.1007",
          "note": "Amit et al. (1985) Storing infinite numbers of patterns in a spin-glass model of neural networks. Phys Rev A 32:1007–1018"
        },
        {
          "note": "Ramsauer et al. (2020) Hopfield Networks Is All You Need. ICLR 2021. arXiv:2008.02217"
        },
        {
          "note": "Krotov & Hopfield (2016) Dense associative memory for pattern recognition. Adv Neural Inf Process Syst 29"
        }
      ],
      "last_reviewed": "2026-05-06",
      "file": "cross-domain/neuroscience-mathematics/b-hopfield-attractor-memory.yaml"
    },
    {
      "id": "b-meg-inverse-source-localization",
      "title": "Magnetoencephalography (MEG) source localization is an ill-posed electromagnetic inverse problem: the measured magnetic field distribution b = L*q admits infinitely many source configurations q, requiring regularization methods (minimum norm, LORETA, beamforming) that impose mathematical priors on source distributions to yield unique neurophysiologically plausible solutions",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "The MEG forward problem b = L*q (b: measured field, L: lead-field matrix, q: dipole moments) is underdetermined because the 300-sensor measurement vector b has far fewer constraints than the ~10^4 cortical source locations in q; the inverse problem min_q ||b - L*q||^2 + λ*R(q) requires regularizatio",
      "open_unknowns": [],
      "related_hypotheses": [],
      "communication_gap": "Neuroscientists use MEG source localization as a black-box tool in neuroimaging pipelines while applied mathematicians study inverse problems and regularization theory in general; the specific challenges of MEG (non-stationary sources, correlated signals, anatomical constraints) require interdisciplinary expertise combining neuroscience anatomy and inverse problem mathematics.",
      "translation_table": [
        {
          "field_a_term": "MEG sensor signals b (neuroscience)",
          "field_b_term": "linear measurement equation b = L*q with underdetermined L (mathematics)",
          "note": "Lead-field matrix L encodes Maxwell's equations; each column is the field pattern of a unit dipole at one cortical location"
        },
        {
          "field_a_term": "neural source localization (neuroscience)",
          "field_b_term": "regularized linear inverse problem solution q̂ = (L^T*L + λ*R)^{-1}*L^T*b (mathematics)",
          "note": "Different R encodings represent different anatomical priors; λ controls regularization strength via L-curve or cross-validation"
        },
        {
          "field_a_term": "MEG spatial resolution limited by sensor count (neuroscience)",
          "field_b_term": "rank deficiency and null space of the lead-field matrix (mathematics)",
          "note": "Rank(L) ≤ 300 sensors; sources in the null space of L are invisible; deep sources have smaller lead-field columns"
        },
        {
          "field_a_term": "beamformer spatial filter for neural source imaging (neuroscience)",
          "field_b_term": "linearly constrained minimum variance (LCMV) adaptive filter (mathematics)",
          "note": "Beamformer w_r = (C^{-1}*L_r)/(L_r^T*C^{-1}*L_r) suppresses interference from competing brain regions"
        }
      ],
      "references": [
        {
          "doi": "10.1016/j.neuroimage.2003.10.036",
          "note": "Hämäläinen & Ilmoniemi (1994) - interpreting MEG data: the minimum norm inverse"
        },
        {
          "doi": "10.1109/TMI.2009.2021641",
          "note": "Hillebrand & Barnes (2003) - beamformer source analysis for MEG"
        },
        {
          "doi": "10.1016/j.neuroimage.2009.02.054",
          "note": "Wipf & Nagarajan (2009) - robust Bayesian estimation for MEG source localization"
        }
      ],
      "last_reviewed": "2026-05-07",
      "file": "cross-domain/neuroscience-mathematics/b-meg-inverse-source-localization.yaml"
    },
    {
      "id": "b-meg-squid-forward-x-em-inverse-source",
      "title": "MEG/EEG forward modeling and SQUID magnetometry ↔ elliptic/inverse electromagnetic source problems in conducting media (neuroimaging ↔ applied mathematics)\n",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "Magnetoencephalography measures magnetic fields outside the head produced by neural currents; SQUID arrays sample those fields at many locations. Recovering distributed current sources is a severely ill-posed inverse problem requiring regularization, subspace constraints, or sparsity priors—mathemat",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-squid-array-regularization-improves-meg-source-localization"
      ],
      "communication_gap": "Neuroimaging toolboxes hide PDE details; applied math EM groups rarely publish on cortical geometry. Noise models and physiological priors differ substantially from industrial EM inverse problems.\n",
      "translation_table": [
        {
          "field_a_term": "volume conductor Green function / lead field matrix L",
          "field_b_term": "forward operator mapping source currents to boundary fields",
          "note": "Same linear algebra skeleton as discrete EM forward models."
        },
        {
          "field_a_term": "SQUID sensor geometry and noise covariance",
          "field_b_term": "measurement operator weights in regularized inversion",
          "note": "Array design sets conditioning of L."
        },
        {
          "field_a_term": "depth-weighted minimum norm / beamformer output",
          "field_b_term": "Tikhonov / sparsity-regularized EM source reconstructions",
          "note": "Regularization choice sets spatial bias."
        }
      ],
      "references": [
        {
          "doi": "10.1016/0013-4694(87)90087-2",
          "note": "Sarvas (1987) — basic formulas for magnetic fields from currents in spherically symmetric conductors."
        },
        {
          "doi": "10.1098/rstb.1996.0126",
          "note": "Ilmoniemi et al. (1996) — MEG inverse problem and instrumentation context (Phil. Trans. R. Soc.)."
        }
      ],
      "last_reviewed": "2026-05-07",
      "file": "cross-domain/neuroscience-mathematics/b-meg-squid-forward-x-em-inverse-source.yaml"
    },
    {
      "id": "b-neuronal-avalanches-branching-process",
      "title": "Neuronal avalanches in cortex are critical branching processes: the branching parameter σ=1 at criticality produces power-law size and duration distributions with exponents τ=3/2, α=2",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "A branching process is a stochastic model where each event (neuron firing) independently spawns k offspring events with expected number σ (branching parameter). At criticality σ=1, avalanche size S and duration T distributions follow power laws: P(S)~S^(−τ) with τ=3/2 and P(T)~T^(−α) with α=2, with ",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-neuronal-avalanches-branching-process"
      ],
      "communication_gap": "Galton-Watson branching process theory is standard in probability and population genetics, but the connection to neural avalanche statistics was made only in 2003. Probability theorists have detailed results on branching process scaling (Harris 1963) that neuroscientists rediscover, including the exact exponents τ=3/2 and α=2. The criticality hypothesis remains contested in neuroscience partly because the statistical tests for power-law distributions are demanding and the branching parameter formulation is rarely used in neurophysiology journals.\n",
      "translation_table": [
        {
          "field_a_term": "branching parameter σ (probability theory)",
          "field_b_term": "ratio of active to activating electrodes in local field potential avalanche",
          "note": "σ = mean number of electrodes activated per activating electrode in LFP data"
        },
        {
          "field_a_term": "subcritical process σ<1 (dying branching process)",
          "field_b_term": "quiescent cortical state, small bounded avalanches",
          "note": "Deactivated or anesthetized cortex shows σ<1 and exponential size distributions"
        },
        {
          "field_a_term": "supercritical process σ>1 (explosive growth)",
          "field_b_term": "seizure dynamics in epilepsy — sustained runaway activity",
          "note": "Epileptiform discharges represent σ>1 dynamics; anticonvulsants reduce σ toward 1"
        },
        {
          "field_a_term": "Galton-Watson extinction probability q",
          "field_b_term": "probability that a single-neuron spike triggers a system-wide avalanche",
          "note": "At σ=1, q=1 (certain extinction) but mean avalanche size diverges"
        }
      ],
      "references": [
        {
          "doi": "10.1523/JNEUROSCI.23-35-11167.2003",
          "note": "Beggs & Plenz (2003) Neuronal avalanches in neocortical circuits. J Neurosci 23:11167 — founding paper of cortical criticality"
        },
        {
          "doi": "10.1038/nphys2478",
          "note": "Shew & Plenz (2013) The functional benefits of criticality in the cortex. Neuroscientist 19:88"
        },
        {
          "doi": "10.1063/1.1699114",
          "note": "Harris (1963) The Theory of Branching Processes. Springer (translated)"
        }
      ],
      "last_reviewed": "2026-05-07",
      "file": "cross-domain/neuroscience-mathematics/b-neuronal-avalanches-branching-process.yaml"
    },
    {
      "id": "b-persistent-homology-neural-representation",
      "title": "Topological data analysis of neural population activity reveals the geometry of cognitive maps — Betti numbers decode represented spaces without positional data",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "The topology of space represented by a neural population can be read directly from the topology of the point cloud formed by population activity vectors, via persistent homology. Place cells encoding a circular track produce Betti₁ = 1 (one loop) in the neural activity point cloud. Grid cells encode",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-tda-cognitive-map-nontrivial-topology"
      ],
      "communication_gap": "Algebraic topology is not standard training for neuroscientists; the mathematical formalism of persistent homology requires graduate-level algebraic topology background. The neuroscience community adopted TDA tools through the Curto & Itskov (2008) paper but the full mathematical machinery remains inaccessible to most experimentalists.\n",
      "translation_table": [
        {
          "field_a_term": "neural population activity vector at time t",
          "field_b_term": "point in high-dimensional Euclidean space (point cloud)",
          "note": "Each time bin gives one point; the cloud samples the neural manifold"
        },
        {
          "field_a_term": "Betti numbers (beta_0, beta_1, beta_2) of the activity cloud",
          "field_b_term": "topological invariants (components, loops, voids) of represented space",
          "note": "beta_1 = 1 for circular track; beta_1 = 2, beta_2 = 1 for torus (grid cells)"
        },
        {
          "field_a_term": "persistence diagram (birth-death pairs)",
          "field_b_term": "multi-scale topological signature robust to noise",
          "note": "Long-lived features are signal; short-lived features are noise"
        },
        {
          "field_a_term": "Vietoris-Rips filtration",
          "field_b_term": "systematic merging of point cloud at increasing distance thresholds",
          "note": "Reveals topological structure at all spatial scales simultaneously"
        }
      ],
      "references": [
        {
          "doi": "10.1007/s00454-004-1146-y",
          "note": "Zomorodian & Carlsson (2005). Computing persistent homology. Discrete Comput Geom 33:249."
        },
        {
          "doi": "10.1371/journal.pcbi.1000205",
          "note": "Curto & Itskov (2008). Cell groups reveal structure of stimulus space. PLOS Comput Biol 4:e1000205."
        },
        {
          "doi": "10.1038/s41586-021-04268-7",
          "note": "Gardner et al. (2022). Toroidal topology of population activity in grid cells. Nature 602:281."
        },
        {
          "doi": "10.1090/S0273-0979-09-01249-X",
          "note": "Carlsson, G. (2009). Topology and data. Bull AMS 46:255."
        }
      ],
      "last_reviewed": "2026-05-06",
      "file": "cross-domain/neuroscience-mathematics/b-persistent-homology-neural-representation.yaml"
    },
    {
      "id": "b-persistent-homology-neural-topology",
      "title": "Topological data analysis via persistent homology — tracking connected components, loops, and voids in simplicial complexes built from neural co-firing patterns across filtration scales — reveals topology-native structure in hippocampal population codes that geometry-based methods miss, providing a direct mathematical tool for understanding how neural manifolds encode behaviorally relevant variables.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "Topological data analysis (TDA) applies algebraic topology to data clouds. The key tool is persistent homology: given a set of points (neurons), build a growing sequence of simplicial complexes (Čech or Vietoris-Rips filtration) by adding edges when neuron pair distances fall below threshold ε, tria",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-hippocampal-place-cell-population-topology-reflects-navigated-space-topology"
      ],
      "communication_gap": "Algebraic topology (persistent homology, simplicial complexes, Betti numbers) is graduate mathematics taught in algebraic topology courses. Neuroscientists analysing population codes typically use PCA, UMAP, or riemannian geometry (not algebraic topology). The TDA community publishes in Foundations of Computational Mathematics and SIAM Journal on Applied and Computational Topology, with limited reach to Neuron, Nature Neuroscience, or Journal of Neuroscience. The Giusti et al. (2015) PNAS paper is the canonical bridge paper but has been primarily cited within the computational neuroscience community, not absorbed into mainstream systems neuroscience.\n",
      "translation_table": [
        {
          "field_a_term": "Betti numbers β₀, β₁, β₂ (algebraic topology)",
          "field_b_term": "Connected components, loops, voids in the neural co-firing complex",
          "note": "β₁ > 0 indicates a loop in the neural population code — e.g. circular track"
        },
        {
          "field_a_term": "Filtration parameter ε (persistent homology)",
          "field_b_term": "Co-firing threshold for including neural pairs in the simplicial complex",
          "note": "Persistent features across a wide range of ε are robust neural topology"
        },
        {
          "field_a_term": "Persistence diagram (birth-death pairs)",
          "field_b_term": "Multi-scale topological fingerprint of the neural population code",
          "note": "Points far from the diagonal = long-lived topological features = true signal"
        },
        {
          "field_a_term": "Clique complex of co-firing graph (mathematics)",
          "field_b_term": "Functional connectivity complex of neural ensemble",
          "note": "Cliques in co-firing graph = simplices in the neural topological complex"
        },
        {
          "field_a_term": "Bottleneck distance between persistence diagrams",
          "field_b_term": "Metric comparing neural topology across sessions, animals, or brain states",
          "note": "Stable metric on topological signatures — allows statistical testing"
        },
        {
          "field_a_term": "Euler characteristic χ = β₀ - β₁ + β₂ (topology)",
          "field_b_term": "Topological summary statistic of neural population code",
          "note": "Cheaper to compute than full persistent homology; useful as a rapid screening statistic"
        }
      ],
      "references": [
        {
          "doi": "10.1073/pnas.1506407112",
          "note": "Giusti, Pastalkova, Curto & Itskov (2015) Clique topology reveals intrinsic geometric structure in neural correlations, PNAS 112:13455 — first demonstration of β₁ topology in hippocampal place cell population\n"
        },
        {
          "doi": "10.1090/bull/1554",
          "note": "Curto, Itskov, Veliz-Cuba & Youngs (2017) The neural ring: an algebraic tool for analyzing the intrinsic structure of neural codes, Bull Am Math Soc 54:201 — clique topology and decodability theory\n"
        },
        {
          "doi": "10.1038/s41467-018-04508-5",
          "note": "Ziegler, Bhatt, Bhatt et al. (2018) Topological organization of the entorhinal cortex, Nat Commun 9:5270 — topology of EC population codes and memory consolidation\n"
        },
        {
          "doi": "10.1090/S0273-0979-09-01249-X",
          "note": "Carlsson (2009) Topology and data, Bull Am Math Soc 46:255 — foundational review of persistent homology and TDA for data analysis\n"
        }
      ],
      "last_reviewed": "2026-05-06",
      "file": "cross-domain/neuroscience-mathematics/b-persistent-homology-neural-topology.yaml"
    },
    {
      "id": "b-spike-sorting-dimensionality-reduction",
      "title": "Multi-electrode array spike sorting — extracting individual neuron activity from high-density recordings — is a dimensionality reduction problem whose solution reveals that neural population activity lives on a low-dimensional manifold embedded in high-dimensional firing-rate space.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "Modern Neuropixels probes record from 384–960 electrodes simultaneously, capturing spikes from hundreds of neurons. Spike sorting — attributing voltage deflections to individual neurons — proceeds as: (1) bandpass filter 300–6000 Hz; (2) threshold crossing detection; (3) feature extraction (PCA of s",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-neural-manifold-geometry-encodes-cognitive-map"
      ],
      "communication_gap": "Experimental neuroscientists who collect Neuropixels data are often not trained in manifold learning mathematics; machine learning researchers who develop UMAP and related algorithms rarely have access to neural datasets or neuroscience intuition. Spike sorting software (Kilosort, MountainSort) is used as a black box by most experimental labs; the statistical assumptions (Gaussian mixture models, template matching) are not widely understood. The manifold hypothesis remains a framework rather than a theorem — its precise mathematical form is debated.\n",
      "translation_table": [
        {
          "field_a_term": "spike waveform in electrode space (64-d feature vector)",
          "field_b_term": "point in waveform manifold — clustered by neuron identity",
          "note": "PCA of waveforms identifies 2-3 dimensions capturing most variance across waveform types"
        },
        {
          "field_a_term": "population firing rate vector r(t) ∈ ℝⁿ",
          "field_b_term": "point on neural population manifold at time t",
          "note": "The manifold is defined by the constraints imposed by synaptic connectivity"
        },
        {
          "field_a_term": "PCA (linear dimensionality reduction)",
          "field_b_term": "principal modes of population activity / functional connectivity",
          "note": "First PC often corresponds to average population firing rate; higher PCs to task-relevant dimensions"
        },
        {
          "field_a_term": "UMAP / Riemannian manifold learning",
          "field_b_term": "nonlinear neural manifold visualization preserving global topology",
          "note": "UMAP from McInnes (2018) outperforms t-SNE for large neural datasets by preserving global structure"
        },
        {
          "field_a_term": "intrinsic dimensionality d of the manifold",
          "field_b_term": "behavioral degrees of freedom encoded in population",
          "note": "Cunningham & Yu (2014): intrinsic dimensionality of motor cortex activity is 3-10 dimensions"
        }
      ],
      "references": [
        {
          "doi": "10.1016/j.brainresbull.2015.04.007",
          "note": "Rey et al. (2015) — Past, present and future of spike sorting techniques, Brain Res Bull 119:106"
        },
        {
          "doi": "10.1038/nature24636",
          "note": "Jun et al. (2017) — Fully integrated silicon probes for high-density recording, Nature 551:232"
        },
        {
          "note": "McInnes et al. (2018) — UMAP: Uniform Manifold Approximation and Projection, arXiv:1802.03426"
        },
        {
          "doi": "10.1038/nn.3776",
          "note": "Cunningham & Yu (2014) — Dimensionality reduction for large-scale neural recordings, Nat Neurosci 17:1500"
        }
      ],
      "last_reviewed": "2026-05-06",
      "file": "cross-domain/neuroscience-mathematics/b-spike-sorting-dimensionality-reduction.yaml"
    },
    {
      "id": "b-topological-neuroscience",
      "title": "The geometric and topological structure of neural population activity manifolds can be characterised by algebraic topology — Betti numbers computed via persistent homology reveal the topology of cognitive representations, hippocampal place cells form a topological map of space, and grid cells tile the plane with hexagonal symmetry corresponding to torus topology.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "Neural activity exists in high-dimensional space (one dimension per neuron), but the activity patterns activated by natural stimuli lie on low-dimensional manifolds. Algebraic topology — specifically persistent homology — can characterise the shape (topology) of these manifolds without knowing the u",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-betti-numbers-cognitive-complexity"
      ],
      "communication_gap": "Algebraic topology was developed by mathematicians (Poincaré, Brouwer, Eilenberg) without application to neuroscience in mind. The application of persistent homology to neural data was pioneered by Curto & Itskov (2008) in a computational biology paper that is not widely read by experimental neuroscientists. Most neuroscientists analyse neural population data using PCA or linear decoding, which cannot detect topological structure. The grid cell torus result (Gardner et al. 2022) appeared in Science and received wide attention, but the persistent homology methods required to reproduce it are not in the standard neuroscience toolkit. Topology courses are not taught in neuroscience PhD programs.\n",
      "translation_table": [
        {
          "field_a_term": "Simplicial complex (from co-firing patterns)",
          "field_b_term": "Topological model of the neural code",
          "note": "Two cells form a 1-simplex (edge) if they co-fire; three cells form a 2-simplex (triangle) if they co-fire, etc."
        },
        {
          "field_a_term": "Betti numbers β_k",
          "field_b_term": "Topological invariants of the encoded cognitive space",
          "note": "β₀ = components, β₁ = loops, β₂ = voids; match the Betti numbers of the represented environment"
        },
        {
          "field_a_term": "Persistent homology barcode",
          "field_b_term": "Scale-robust topological signature of neural population activity",
          "note": "Long bars = robust topological features; short bars = noise; bars at dimension k encode k-dimensional holes"
        },
        {
          "field_a_term": "Torus manifold T²",
          "field_b_term": "Population activity geometry of grid cells in 2D environment",
          "note": "The two independent S¹ circles of T² correspond to two spatial periods of the hexagonal grid pattern"
        },
        {
          "field_a_term": "Euler characteristic χ = Σ (-1)^k β_k",
          "field_b_term": "Topological signature of the neural manifold",
          "note": "For a torus: χ = 0; for a sphere: χ = 2; for a point: χ = 1"
        },
        {
          "field_a_term": "Neural ring (Stanley-Reisner ring)",
          "field_b_term": "Algebraic characterisation of which co-firing patterns encode convex place fields",
          "note": "Algebraic structure determines which neural codes are geometrically realisable"
        },
        {
          "field_a_term": "Vietoris-Rips complex",
          "field_b_term": "Topological model built from pairwise neural distances",
          "note": "All cells within distance ε are connected into simplices; varying ε generates the persistence diagram"
        }
      ],
      "references": [
        {
          "doi": "10.1371/journal.pcbi.0040068",
          "note": "Curto & Itskov (2008) PLOS Comput Biol 4:e68 — cell groups reveal structure of stimulus space"
        },
        {
          "doi": "10.1126/science.abg4894",
          "note": "Gardner et al. (2022) Science 372:eabg4894 — toroidal topology of population activity in grid cells"
        },
        {
          "doi": "10.1371/journal.pcbi.1002581",
          "note": "Dabaghian et al. (2012) PLOS Comput Biol 8:e1002581 — topological model of hippocampal activity"
        },
        {
          "doi": "10.1090/bull/1546",
          "note": "Carlsson (2009) Bull AMS 46:255 — topology and data; introduction to persistent homology"
        },
        {
          "doi": "10.1073/pnas.0500306102",
          "note": "Quiroga et al. (2005) Nature 435:1102 — invariant visual representation by single neurons in human medial temporal lobe"
        }
      ],
      "last_reviewed": "2026-05-05",
      "file": "cross-domain/neuroscience-mathematics/b-topological-neuroscience.yaml"
    },
    {
      "id": "b-connectome-neurodegeneration",
      "title": "Graph-theoretic measures of brain connectome topology (clustering coefficient, path length, hub vulnerability) that characterize healthy neural networks predict neurodegenerative disease progression and clinical treatment targets in Alzheimer's, Parkinson's, and epilepsy.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "proposed",
      "bridge_claim": "Network neuroscience applies graph theory to the brain's connectome — the wiring diagram of structural and functional connections between regions. The same measures used to characterize small-world networks in physics (clustering coefficient C, characteristic path length L, hub centrality) directly ",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-scale-free-criticality-brain-hub-vulnerability",
        "h-hopfield-alzheimers-glass-transition"
      ],
      "communication_gap": "Clinical neurology and network neuroscience publish in largely non-overlapping journals (Lancet Neurology, JAMA Neurology vs. Nature Neuroscience, Network Neuroscience). Graph-theoretic terminology (spectral gap, Laplacian eigenvectors, rich-club) is unfamiliar to clinicians trained on anatomy and pharmacology. MRI tractography data required for structural connectomics is available in most academic medical centers but is rarely analyzed with graph tools outside specialized computational labs. Clinical trial endpoints (cognitive tests, PET staging) do not yet include connectome metrics, preventing validation in large powered cohorts.\n",
      "translation_table": [
        {
          "field_a_term": "clustering coefficient C",
          "field_b_term": "local recurrent connectivity density in cortical circuits; elevated in seizure onset zones",
          "note": "High C with low L defines small-world topology; disease disrupts this balance"
        },
        {
          "field_a_term": "characteristic path length L",
          "field_b_term": "mean axonal communication distance; increases in Alzheimer's white matter degeneration"
        },
        {
          "field_a_term": "hub node (high betweenness or eigenvector centrality)",
          "field_b_term": "default-mode network regions (precuneus, posterior cingulate) — earliest amyloid accumulation sites"
        },
        {
          "field_a_term": "network diffusion / graph Laplacian propagation",
          "field_b_term": "trans-synaptic spread of tau and alpha-synuclein along connectome edges"
        },
        {
          "field_a_term": "spectral gap (lambda_2 of graph Laplacian)",
          "field_b_term": "connectome resilience to lesioning; predicts cognitive reserve capacity"
        },
        {
          "field_a_term": "rich-club coefficient",
          "field_b_term": "high-cost, high-capacity cortical backbone attacked preferentially in neurodegeneration"
        }
      ],
      "references": [
        {
          "doi": "10.1073/pnas.1924041117",
          "note": "Raj et al. (2020) — network diffusion model predicts tau PET staging in Alzheimer's disease"
        },
        {
          "doi": "10.1038/nn.2208",
          "note": "Buckner et al. (2009) — default mode network hubs correspond to early amyloid deposition sites"
        },
        {
          "doi": "10.1523/JNEUROSCI.2242-16.2016",
          "note": "Lariviere et al. — epilepsy surgery outcome predicted by graph-theoretic ictogenicity"
        },
        {
          "doi": "10.1016/j.neuron.2019.07.056",
          "note": "Vogel et al. (2019) — data-driven subtypes of Alzheimer's tau spread follow connectome topology"
        },
        {
          "doi": "10.1016/j.neuroimage.2012.06.002",
          "note": "Stam (2014) — modern network analysis of EEG and MEG: theory and clinical applications"
        },
        {
          "doi": "10.1073/pnas.0701519104",
          "note": "Sporns et al. (2007) — identification and classification of hubs in brain networks"
        }
      ],
      "last_reviewed": "2026-05-05",
      "file": "cross-domain/neuroscience-medicine/b-connectome-neurodegeneration.yaml"
    },
    {
      "id": "b-placebo-predictive-coding-bayesian-brain",
      "title": "The placebo effect is a mechanistic consequence of Bayesian predictive coding in the brain: top-down expectation signals from prior beliefs about treatment efficacy suppress bottom-up pain and symptom signals via hierarchical prediction error minimisation, making placebo magnitude a direct measure of prior strength in the brain's generative model.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "proposed",
      "bridge_claim": "The placebo effect — symptom relief from inert treatment — has been dismissed as a confound, but neuroscience reveals it as a feature of the brain's Bayesian predictive coding architecture. The predictive coding framework (Rao & Ballard 1999; Friston 2005) proposes the brain minimises prediction err",
      "open_unknowns": [],
      "related_hypotheses": [],
      "communication_gap": "",
      "translation_table": [
        {
          "field_a_term": "prior P(effect | treatment)",
          "field_b_term": "patient expectation of treatment efficacy",
          "note": "The probability distribution over outcomes encoded in the brain before sensory input"
        },
        {
          "field_a_term": "prediction error (sensory - predicted)",
          "field_b_term": "difference between actual symptom intensity and expected symptom level",
          "note": "Minimised by descending modulation — placebo reduces upward error propagation"
        },
        {
          "field_a_term": "precision weighting",
          "field_b_term": "certainty of expectation (precise vs. vague treatment beliefs)",
          "note": "High-precision priors (e.g., opioid injection > sugar pill) produce larger placebo effects"
        },
        {
          "field_a_term": "Bayesian updating",
          "field_b_term": "conditioning (repeated placebo administration strengthens the effect)",
          "note": "Each treatment-relief pairing updates the prior, increasing future placebo magnitude"
        }
      ],
      "references": [
        {
          "doi": "10.1038/nn.2112",
          "note": "Friston (2009) — the free energy principle; a unified brain theory"
        },
        {
          "doi": "10.1016/j.neuron.2005.12.023",
          "note": "Petrovic et al. (2005) — placebo and opioid analgesia — imaging a shared neuronal network"
        },
        {
          "doi": "10.1038/nrn1838",
          "note": "Wager & Atlas (2015) — the neuroscience of placebo effects"
        }
      ],
      "last_reviewed": "2026-05-07",
      "file": "cross-domain/neuroscience-medicine/b-placebo-predictive-coding-bayesian-brain.yaml"
    },
    {
      "id": "b-predictive-coding-phenomenal-consciousness",
      "title": "Predictive coding frames perception as hierarchical Bayesian inference, bridging computational neuroscience to the hard problem of consciousness by proposing phenomenal experience as residual unresolved prediction error",
      "source_domain": "",
      "target_domain": "",
      "status": "proposed",
      "bridge_claim": "Predictive coding (Rao & Ballard 1999; Friston 2010; Clark 2013) proposes that the brain is a hierarchical Bayesian prediction machine: top-down predictions cancel bottom-up sensory signals, with only prediction errors propagating upward for belief updating. This framework unifies perception, attent",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-predictive-coding-precision-weighting-explains-attention-consciousness"
      ],
      "communication_gap": "Philosophers of mind and computational neuroscientists rarely engage with each other's technical arguments. Philosophers lack mathematical fluency with Bayesian inference; neuroscientists avoid the hard problem as \"not scientific.\" The predictive coding bridge offers a scientific framework with genuine philosophical implications that both communities resist fully engaging with.\n",
      "translation_table": [
        {
          "field_a_term": "prediction error (PE) signal",
          "field_b_term": "phenomenal experience / qualia",
          "note": "Proposed identification — unresolved PE corresponds to conscious percept"
        },
        {
          "field_a_term": "prior (top-down prediction)",
          "field_b_term": "concept / expectation",
          "note": "Prior beliefs shape percepts just as concepts shape phenomenology"
        },
        {
          "field_a_term": "free energy minimisation (active inference)",
          "field_b_term": "minimisation of existential surprise",
          "note": "Friston extends free energy principle to model all purposive behaviour"
        },
        {
          "field_a_term": "precision weighting of PE",
          "field_b_term": "attentional gain / salience",
          "note": "High-precision PEs enter consciousness; low-precision ones do not"
        }
      ],
      "references": [
        {
          "note": "Friston (2010) — free energy principle and the brain",
          "doi": "10.1038/nrn2787"
        },
        {
          "note": "Clark (2013) — whatever next? Predictive brains, situated agents",
          "doi": "10.1017/S0140525X12000477"
        },
        {
          "note": "Chalmers (1996) The Conscious Mind — hard problem of consciousness"
        },
        {
          "note": "Hohwy (2013) The Predictive Mind — philosophy of predictive coding"
        }
      ],
      "last_reviewed": "2026-05-06",
      "file": "cross-domain/neuroscience-philosophy/b-predictive-coding-phenomenal-consciousness.yaml"
    },
    {
      "id": "b-eeg-dipole-source-maxwell-equations",
      "title": "EEG source localization inverts the quasi-static electromagnetic forward problem: cortical current dipoles (synchronized postsynaptic potentials) generate scalp surface potentials governed by the quasi-static Maxwell equations in a heterogeneous conducting medium, making EEG source imaging a regularized inverse problem in applied electromagnetics\n",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "Scalp EEG potentials are generated by primary current dipoles J^p (synchronized apical dendrite postsynaptic currents) embedded in brain tissue; the forward problem is governed by quasi-static Maxwell equations ∇·(σ∇φ) = ∇·J^p in conductivity σ(r), with boundary conditions on skull/scalp interfaces;",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-eeg-individualized-forward-model-epilepsy"
      ],
      "communication_gap": "Clinical EEG specialists interpret scalp waveforms without solving source localization while applied mathematicians develop inverse problem algorithms; the electromagnetic physics underlying EEG is not taught in most neuroscience programs, limiting uptake of quantitative source imaging methods in clinical practice.\n",
      "translation_table": [
        {
          "field_a_term": "cortical current dipole J^p (neuroscience)",
          "field_b_term": "primary current density source in quasi-static electromagnetic forward model (physics)",
          "note": "J^p represents synchronized postsynaptic currents in cortical macrocolumns; each dipole has location, orientation, and moment"
        },
        {
          "field_a_term": "scalp surface potential φ(r_electrode) (neuroscience)",
          "field_b_term": "solution to Poisson equation ∇·(σ∇φ) = ∇·J^p with Neumann BC at scalp (physics)",
          "note": "EEG measurement at N electrodes samples the solution to the forward electromagnetic problem"
        },
        {
          "field_a_term": "EEG inverse problem (neuroscience)",
          "field_b_term": "under-determined linear system L j = v where L is lead field matrix (physics)",
          "note": "Lead field matrix L maps each possible dipole source to predicted surface potential; N_electrodes << N_sources"
        },
        {
          "field_a_term": "LORETA / sLORETA source reconstruction (neuroscience)",
          "field_b_term": "L2 regularization of the electromagnetic inverse problem with Laplacian smoothness prior (physics)",
          "note": "LORETA solves min||Lj - v||² + λ||Wj||² with W = spatial Laplacian; equivalent to Tikhonov regularization"
        }
      ],
      "references": [
        {
          "doi": "10.1016/j.neuroimage.2006.01.032",
          "note": "Grech et al. (2008) - review of solving the inverse problem in EEG source analysis"
        },
        {
          "doi": "10.1016/S1388-2457(01)00571-1",
          "note": "Pascual-Marqui (2002) - standardized low-resolution brain electromagnetic tomography (sLORETA)"
        }
      ],
      "last_reviewed": "2026-05-07",
      "file": "cross-domain/neuroscience-physics/b-eeg-dipole-source-maxwell-equations.yaml"
    },
    {
      "id": "b-free-energy-principle-thermodynamics",
      "title": "Friston's Free Energy Principle in theoretical neuroscience is formally isomorphic to thermodynamic free energy minimisation in statistical mechanics: the KL divergence between approximate and true posterior plays the role of entropy, and active inference (action minimises surprise) is the biological analogue of thermodynamic relaxation toward equilibrium.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "proposed",
      "bridge_claim": "The thermodynamic free energy in statistical mechanics is F = U - TS, where U is internal energy, T is temperature, and S is entropy. A system at equilibrium minimises F, which is equivalent to maximising entropy subject to energy constraints (the Helmholtz principle).\nFriston's variational free ene",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-active-inference-thermodynamic-efficiency"
      ],
      "communication_gap": "Friston's FEP was developed in the theoretical neuroscience literature from 2005 onwards. Despite the explicit use of thermodynamic language (free energy, entropy, surprise), the connection to Jaynes' maximum- entropy statistical mechanics and non-equilibrium thermodynamics is not quantitative in most neuroscience applications. Thermodynamicists and statistical physicists are largely unaware of the FEP. The barrier is partly sociological (neuroscience versus physics journals) and partly mathematical (the variational Bayesian language is unfamiliar to physicists, while non-equilibrium thermo is unfamiliar to most neuroscientists).\n",
      "translation_table": [
        {
          "field_a_term": "Thermodynamic free energy F = U - TS",
          "field_b_term": "Variational free energy F = E_q[log q - log p]",
          "note": "Exact algebraic isomorphism: internal energy U ↔ E_q[-log p]; temperature-entropy TS ↔ entropy of beliefs S_q"
        },
        {
          "field_a_term": "Entropy S of thermodynamic system",
          "field_b_term": "Entropy H[q] = -E_q[log q] of the approximate posterior",
          "note": "Both measure the uncertainty / disorder of the system's state distribution"
        },
        {
          "field_a_term": "Equilibrium state (minimises F)",
          "field_b_term": "Perceptual inference (minimises F by updating beliefs)",
          "note": "Perception = equilibration of internal model to sensory evidence"
        },
        {
          "field_a_term": "Non-equilibrium work against disorder",
          "field_b_term": "Active inference (action changes sensory input to match predictions)",
          "note": "Both require energy expenditure to maintain low-entropy ordered states"
        },
        {
          "field_a_term": "Thermodynamic system boundary / adiabatic wall",
          "field_b_term": "Markov blanket separating agent from environment",
          "note": "Conditional independence = informational insulation = thermodynamic isolation"
        },
        {
          "field_a_term": "Boltzmann distribution p(x) ∝ exp(-E(x)/kT)",
          "field_b_term": "Posterior distribution p(φ|s) ∝ exp(-F(φ,s))",
          "note": "Variational free energy plays the role of scaled energy E(x)/kT"
        },
        {
          "field_a_term": "Schrödinger's negentropy (life fights entropy)",
          "field_b_term": "Self-evidencing (organisms minimise surprise to maintain homeostasis)",
          "note": "Equivalent formulations of the thermodynamic cost of life"
        }
      ],
      "references": [
        {
          "doi": "10.1038/nrn2787",
          "note": "Friston (2010) Nat Rev Neurosci 11:127 — The free-energy principle: a unified brain theory"
        },
        {
          "doi": "10.1080/1047840X.2016.1201141",
          "note": "Friston et al. (2016) Psychol Inq 27:1 — Active inference and learning"
        },
        {
          "doi": "10.1103/PhysRev.106.620",
          "note": "Jaynes (1957) Phys Rev 106:620 — Information theory and statistical mechanics; maximum entropy derivation of thermodynamics"
        },
        {
          "note": "Schrödinger (1944) What is Life? — negentropy and the thermodynamics of living systems",
          "url": "https://archive.org/details/whatislife00schr"
        },
        {
          "doi": "10.1162/089976699300016953",
          "note": "Rao & Ballard (1999) Nat Neurosci 2:79 — predictive coding in the visual cortex; neural implementation of FEP"
        }
      ],
      "last_reviewed": "2026-05-05",
      "file": "cross-domain/neuroscience-physics/b-free-energy-principle-thermodynamics.yaml"
    },
    {
      "id": "b-hodgkin-huxley-conductance",
      "title": "The Hodgkin-Huxley equations describe action potential generation as a system of nonlinear ODEs where ion channel conductances follow voltage-dependent gating kinetics, reducing neural excitability to measurable biophysical parameters",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "Action potential generation in squid giant axon (and all neurons) is quantitatively described by C_m * dV/dt = -g_Na * m^3 * h * (V - E_Na) - g_K * n^4 * (V - E_K) - g_L * (V - E_L) + I, where m, h, n are voltage-dependent gating variables following first-order kinetics with rate constants measured ",
      "open_unknowns": [],
      "related_hypotheses": [],
      "communication_gap": "The Hodgkin-Huxley model is taught in both neuroscience and biophysics but the connection between voltage-clamp measurements, Nernst potentials, and the full nonlinear dynamics is rarely integrated; computational neuroscience treats it as a black box while biophysicists focus on individual channel kinetics without connecting to whole-neuron computation.",
      "translation_table": [
        {
          "field_a_term": "membrane capacitance C_m charging/discharging",
          "field_b_term": "parallel RC circuit with nonlinear conductance branches",
          "note": "C_m ~ 1 muF/cm^2 for biological membranes; time constant tau = RC sets spike width"
        },
        {
          "field_a_term": "Na+ channel activation gate m",
          "field_b_term": "dm/dt = alpha_m(V)(1-m) - beta_m(V)m; tau_m ~ 0.5 ms at -40 mV",
          "note": "m^3 nonlinearity causes threshold-like sodium current activation (cooperative gating)"
        },
        {
          "field_a_term": "Na+ channel inactivation gate h",
          "field_b_term": "dh/dt = alpha_h(V)(1-h) - beta_h(V)h; tau_h ~ 5 ms at -40 mV",
          "note": "h provides refractory period; h closes slowly after Na+ influx, preventing re-excitation"
        },
        {
          "field_a_term": "K+ channel activation gate n",
          "field_b_term": "dn/dt = alpha_n(V)(1-n) - beta_n(V)n; tau_n ~ 5 ms at -50 mV",
          "note": "n^4 nonlinearity delays K+ current; repolarizes membrane and produces afterhyperpolarization"
        }
      ],
      "references": [
        {
          "doi": "10.1113/jphysiol.1952.sp004764",
          "note": "Hodgkin & Huxley (1952) J Physiol - original HH model quantitatively describing action potential"
        },
        {
          "doi": "10.1113/jphysiol.1952.sp004717",
          "note": "Hodgkin & Huxley (1952) J Physiol - voltage clamp measurements of Na and K conductances"
        },
        {
          "doi": "10.1007/s00422-007-0178-y",
          "note": "Izhikevich (2007) Dynamical Systems in Neuroscience - bifurcation analysis of HH and reduced models"
        }
      ],
      "last_reviewed": "2026-05-07",
      "file": "cross-domain/neuroscience-physics/b-hodgkin-huxley-conductance.yaml"
    },
    {
      "id": "b-holographic-memory-fourier-phase-encoding",
      "title": "Holographic memory models propose that the brain stores information as distributed interference patterns across neural assemblies, analogous to optical holography where images are encoded in the Fourier-domain phase of an interference pattern and reconstructed by coherent illumination\n",
      "source_domain": "",
      "target_domain": "",
      "status": "proposed",
      "bridge_claim": "In optical holography, an object wavefront O(x) interferes with a reference beam R(x) to record the hologram H(x) = |O + R|² = |O|² + |R|² + O*R + OR*; reconstruction with R illumination recovers O as the first-order diffraction term, with the object distributed across the entire hologram so that an",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-hippocampal-population-holographic-capacity"
      ],
      "communication_gap": "Neuroscientists study memory using behavioral assays and electrophysiology while optical physicists study holographic storage in photorefractive materials; the neural holography analogy is known historically (Pribram 1969, Gabor) but is treated as a metaphor rather than a quantitative physical model in most neuroscience, lacking testable predictions that distinguish holographic from non-holographic neural memory.\n",
      "translation_table": [
        {
          "field_a_term": "synaptic weight matrix W_ij (neuroscience)",
          "field_b_term": "hologram H(x) recording interference of signal and reference wavefronts (optics)",
          "note": "Outer product Hebbian learning W += x x^T records the memory as a hologram; pattern completion is reconstruction"
        },
        {
          "field_a_term": "Hopfield network pattern completion (neuroscience)",
          "field_b_term": "holographic reconstruction: partial cue → full pattern (optics)",
          "note": "Partial activation of stored memory pattern triggers attractor dynamics recovering full pattern, as partial hologram reconstructs full image"
        },
        {
          "field_a_term": "hippocampal theta phase precession (neuroscience)",
          "field_b_term": "coherent phase reference for neural holographic encoding (optics)",
          "note": "Theta oscillation provides the reference wave; spike timing relative to theta phase encodes position/content as phase modulation"
        },
        {
          "field_a_term": "memory capacity C ~ 0.138 N bits (Hopfield) (neuroscience)",
          "field_b_term": "holographic storage capacity limited by diffraction efficiency and noise (optics)",
          "note": "Hopfield capacity limit C ≈ 0.138 N (cross-talk noise) is analogous to holographic SNR-limited storage capacity scaling"
        }
      ],
      "references": [
        {
          "doi": "10.1016/0364-5412(82)90023-5",
          "note": "Hopfield (1982) - neural networks and physical systems with emergent collective computational abilities"
        },
        {
          "doi": "10.1038/s41583-022-00634-8",
          "note": "Quiroga (2023) - concept cells: the building blocks of declarative memory functions"
        }
      ],
      "last_reviewed": "2026-05-07",
      "file": "cross-domain/neuroscience-physics/b-holographic-memory-fourier-phase-encoding.yaml"
    },
    {
      "id": "b-integrate-fire-stochastic-processes",
      "title": "The leaky integrate-and-fire neuron with noisy input is an Ornstein-Uhlenbeck process, making neural firing rate prediction equivalent to the first-passage time problem in stochastic physics.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "The leaky integrate-and-fire (LIF) neuron model, τ_m dV/dt = −(V − V_rest) + RI(t), with stochastic input I(t) = μ + σξ(t) (white noise), is exactly the Ornstein-Uhlenbeck (OU) process from stochastic physics. The membrane potential V(t) performs mean-reverting Brownian motion toward V_rest with noi",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-lif-decision-fatigue-ornstein-uhlenbeck"
      ],
      "communication_gap": "Computational neuroscience and stochastic physics use different notation and journals. The Fokker-Planck/Ornstein-Uhlenbeck connection to LIF models is known in theoretical neuroscience (Brunel 2000) but not widely taught in neuroscience graduate programs, where biophysics courses rarely cover stochastic differential equations.\n",
      "translation_table": [
        {
          "field_a_term": "membrane time constant τ_m",
          "field_b_term": "OU mean-reversion rate (friction)",
          "note": "Determines how quickly voltage decays to rest without input"
        },
        {
          "field_a_term": "threshold crossing",
          "field_b_term": "first-passage time to absorbing barrier",
          "note": "Neural firing = first time OU process hits threshold V_th"
        },
        {
          "field_a_term": "interspike interval distribution",
          "field_b_term": "first-passage time distribution of OU process",
          "note": "Analytically tractable via Fokker-Planck; ISI mean and variance predictable"
        },
        {
          "field_a_term": "population density equation (neural mass model)",
          "field_b_term": "Fokker-Planck PDE",
          "note": "Continuity equation in voltage space; describes population dynamics of LIF networks"
        }
      ],
      "references": [
        {
          "note": "Lapicque, L. (1907). Recherches quantitatives sur l'excitation électrique des nerfs traitée comme une polarisation. J Physiol Pathol Gen 9:620."
        },
        {
          "note": "Tuckwell, H.C. (1988). Introduction to Theoretical Neurobiology (Vol. 2). Cambridge University Press."
        },
        {
          "note": "Ricciardi, L.M. (1977). Diffusion Processes and Related Topics in Biology. Springer."
        },
        {
          "doi": "10.1023/A:1008925309027",
          "note": "Brunel, N. (2000). Dynamics of sparsely connected networks of excitatory and inhibitory spiking neurons. J Comput Neurosci 8:183."
        }
      ],
      "last_reviewed": "2026-05-06",
      "file": "cross-domain/neuroscience-physics/b-integrate-fire-stochastic-processes.yaml"
    },
    {
      "id": "b-neural-avalanches-criticality",
      "title": "Spontaneous neuronal activity in the cortex exhibits power-law avalanche statistics matching mean-field critical branching process predictions, suggesting the brain operates at the edge of a second-order phase transition — a state that maximises dynamic range, information transmission, and computational repertoire simultaneously.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "Self-organised criticality (SOC): Bak, Tang & Wiesenfeld (1987) discovered that many open dissipative systems naturally evolve toward a critical state characterised by power-law distributions, without fine-tuning of parameters. The canonical example is the sandpile model — adding grains one at a tim",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-neural-avalanche-criticality-dynamic-range"
      ],
      "communication_gap": "Statistical physics of phase transitions and SOC was developed in physics departments by condensed matter physicists (Bak, Kadanoff, Wilson). Neuroscientists studying cortical dynamics had no exposure to these concepts until Beggs & Plenz (2003) explicitly applied them. Even then, the neuroscience community was sceptical because: (1) power laws can arise from many mechanisms beyond SOC, (2) the specific exponents were not universally reproduced, and (3) the physiological mechanism maintaining σ ≈ 1 (synaptic homeostasis) was identified much later. The connection remains contested because verifying SOC universality class requires multiple scaling exponents — a standard requirement in physics but unfamiliar to most neuroscientists.\n",
      "translation_table": [
        {
          "field_a_term": "branching ratio σ (average recruits per active neuron)",
          "field_b_term": "criticality order parameter of the branching process"
        },
        {
          "field_a_term": "neuronal avalanche (cascade of correlated activations)",
          "field_b_term": "cluster in percolation theory / avalanche in sandpile model"
        },
        {
          "field_a_term": "power-law P(s) ∝ s^(-3/2) (avalanche size distribution)",
          "field_b_term": "mean-field critical branching process exponent (Galton-Watson)"
        },
        {
          "field_a_term": "subcritical cortex (σ < 1, anaesthesia, slow-wave sleep)",
          "field_b_term": "subcritical phase (finite correlation length, exponential decay)"
        },
        {
          "field_a_term": "supercritical cortex (σ > 1, seizure, epilepsy)",
          "field_b_term": "supercritical phase (runaway cascades, divergent cluster size)"
        },
        {
          "field_a_term": "synaptic plasticity (homeostatic maintenance of σ ≈ 1)",
          "field_b_term": "self-organised criticality (parameter-free critical state maintenance)"
        },
        {
          "field_a_term": "dynamic range of sensory cortex (input discrimination range)",
          "field_b_term": "susceptibility peak at the critical point"
        }
      ],
      "references": [
        {
          "doi": "10.1523/JNEUROSCI.5601-03.2003",
          "note": "Beggs & Plenz (2003) J Neurosci 23:11167 — neural avalanches in cortical cultures"
        },
        {
          "doi": "10.1038/nphys1746",
          "note": "Chialvo (2010) Nat Phys 6:744 — emergent complex neural dynamics at criticality"
        },
        {
          "doi": "10.1177/1073858412440101",
          "note": "Shew & Plenz (2013) Neuroscientist 19:88 — functional benefits of criticality"
        },
        {
          "doi": "10.1371/journal.pcbi.1003985",
          "note": "Priesemann et al. (2014) PLoS Comput Biol 10:e1003985 — in vivo cortex subcritical"
        }
      ],
      "last_reviewed": "2026-05-06",
      "file": "cross-domain/neuroscience-physics/b-neural-avalanches-criticality.yaml"
    },
    {
      "id": "b-neural-binding-gamma-oscillations",
      "title": "The neural binding problem is proposed to be solved by gamma-band (30-100 Hz) oscillatory synchrony, linking the perceptual unification of distributed cortical representations to the physics of coupled oscillator synchronization.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "contested",
      "bridge_claim": "The binding problem (how the brain integrates distributed neural representations into unified percepts) maps onto the physics of synchronization in coupled oscillator networks: cortical gamma oscillations act as a carrier wave enabling phase-locking between distant neuronal assemblies, with synchron",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-gamma-oscillations-binding-causal-test"
      ],
      "communication_gap": "Neurophysiologists record gamma oscillations experimentally while physicists develop coupled oscillator theory; the binding-by-synchrony hypothesis has existed since Singer & Gray (1989) but remains contested partly because the quantitative mapping between neural data and oscillator theory models has not been rigorously tested.\n",
      "translation_table": [
        {
          "field_a_term": "gamma oscillation coherence (neuroscience)",
          "field_b_term": "order parameter r in Kuramoto model (physics)",
          "note": "PLV between cortical regions is the neural analog of the Kuramoto synchronization order parameter"
        },
        {
          "field_a_term": "neural assembly synchrony (neuroscience)",
          "field_b_term": "phase locking in coupled oscillators (nonlinear dynamics)",
          "note": "Synchronized firing corresponds to phase-locked states; desynchronization to incoherent phase drift"
        },
        {
          "field_a_term": "cross-frequency coupling theta-gamma (neuroscience)",
          "field_b_term": "nonlinear mode coupling / Arnold tongue (physics)",
          "note": "Nested oscillations arise from nonlinear coupling analogous to driven oscillator resonance"
        },
        {
          "field_a_term": "binding by synchrony (cognitive neuroscience)",
          "field_b_term": "entrainment and mutual coupling in oscillator networks (physics)",
          "note": "Perceptual binding requires stable entrainment of spatially distributed oscillators"
        }
      ],
      "references": [
        {
          "doi": "10.1126/science.1126488",
          "note": "Fries (2005) - mechanism of gamma oscillations and neural communication through coherence"
        },
        {
          "doi": "10.1038/nrn2008",
          "note": "Uhlhaas & Singer (2006) - neural synchrony in brain disorders"
        },
        {
          "doi": "10.1016/j.neuron.2015.09.034",
          "note": "Buzsaki & Wang (2015) - mechanisms of gamma oscillations in cortex"
        }
      ],
      "last_reviewed": "2026-05-07",
      "file": "cross-domain/neuroscience-physics/b-neural-binding-gamma-oscillations.yaml"
    },
    {
      "id": "b-neural-field-theory-brain-waves",
      "title": "Wilson-Cowan neural field equations are a biological reaction-diffusion system — dispersion relations predict EEG frequency bands as spatial-temporal resonances of excitatory-inhibitory cortical sheets",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "Neural field theory (Wilson-Cowan 1972, Amari 1977) treats the cortex as a continuous excitable medium: population firing rates E(r,t) and I(r,t) obey integro-differential equations τ_E ∂E/∂t = -E + F[w_{EE}*E - w_{IE}*I + I_ext]. Linearization around the fixed point yields wave solutions with a dis",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-cortical-eigenmodes-universal-resting-state-basis"
      ],
      "communication_gap": "Theoretical neuroscientists working on neural field theory publish in Physical Review E, PLOS Computational Biology, and NeuroImage; cognitive and systems neuroscientists measure EEG/fMRI in Journal of Neuroscience, Neuron, and Nature Neuroscience. The mathematical physics framework is rarely cited in empirical neuroscience papers. Neural field theory training is absent from most neuroscience PhD programs, limiting adoption of the framework for interpreting resting-state imaging data.\n",
      "translation_table": [
        {
          "field_a_term": "neural firing rate field E(r,t) (neuroscience)",
          "field_b_term": "reaction-diffusion activator field (physics)",
          "note": "The Wilson-Cowan excitatory field plays the role of the activator in Turing-type systems"
        },
        {
          "field_a_term": "EEG frequency bands (alpha, theta, gamma)",
          "field_b_term": "normal modes of a driven oscillator (physics)",
          "note": "Each EEG band corresponds to a resonant mode of the cortical sheet with specific spatial wavenumber"
        },
        {
          "field_a_term": "connectome structural Laplacian (neuroscience)",
          "field_b_term": "graph Laplacian eigenmodes (mathematics/physics)",
          "note": "Resting-state fMRI networks are eigenmodes of the brain graph — analogous to vibration modes of a structure"
        },
        {
          "field_a_term": "conduction velocity of myelinated axons",
          "field_b_term": "wave propagation speed in dispersive medium",
          "note": "Finite conduction velocity introduces temporal delay — the source of propagating traveling waves in cortex"
        },
        {
          "field_a_term": "E-I balance (excitatory-inhibitory ratio)",
          "field_b_term": "damping ratio of oscillator (physics)",
          "note": "E-I ratio determines whether the cortical system is underdamped (oscillatory) or overdamped (non-oscillatory)"
        }
      ],
      "references": [
        {
          "doi": "10.1016/S0006-3495(72)86068-5",
          "note": "Wilson & Cowan (1972). Excitatory and inhibitory interactions in localized populations of model neurons. Biophys J 12:1."
        },
        {
          "doi": "10.1007/BF00337259",
          "note": "Amari (1977). Dynamics of pattern formation in lateral-inhibition type neural fields. Biol Cybern 27:77."
        },
        {
          "doi": "10.1016/j.neuroimage.2016.02.040",
          "note": "Robinson et al. (2016). Eigenmodes of brain activity: neural field theory predictions and comparison with experiment. NeuroImage 142:79."
        },
        {
          "doi": "10.1038/s41583-018-0074-y",
          "note": "Muller et al. (2018). Cortical travelling waves: mechanisms and computational principles. Nat Rev Neurosci 19:255."
        }
      ],
      "last_reviewed": "2026-05-06",
      "file": "cross-domain/neuroscience-physics/b-neural-field-theory-brain-waves.yaml"
    },
    {
      "id": "b-neuronal-synchrony-ping-model",
      "title": "Gamma oscillations in cortical circuits emerge from the PING mechanism — Pyramidal-Interneuron Network Gamma — where excitatory cells drive fast-spiking interneurons that provide delayed inhibition, creating limit cycle oscillations that synchronise population activity; the same coupled oscillator physics describes Josephson junction arrays, laser synchronisation, and circadian pacemaker networks.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "proposed",
      "bridge_claim": "Cortical gamma oscillations (30-80 Hz) are thought to coordinate information processing across neural circuits. The PING model (Whittington et al. 1995; Traub et al. 1997) explains their generation: excitatory (E) pyramidal cells fire and recruit fast-spiking inhibitory (I) interneurons; the interne",
      "open_unknowns": [],
      "related_hypotheses": [],
      "communication_gap": "",
      "translation_table": [
        {
          "field_a_term": "Hopf bifurcation",
          "field_b_term": "transition from asynchronous firing to gamma oscillation",
          "note": "Gamma oscillations emerge at a Hopf bifurcation as E drive or I coupling crosses a threshold"
        },
        {
          "field_a_term": "Kuramoto synchronisation order parameter",
          "field_b_term": "population synchrony (coherence of gamma oscillation across neurons)",
          "note": "r = 0 (asynchronous) to r = 1 (fully synchronous population gamma)"
        },
        {
          "field_a_term": "phase response curve (PRC)",
          "field_b_term": "sensitivity of a neuron's firing to inputs at different phases",
          "note": "The PRC determines whether E-I coupling produces synchronising or desynchronising interactions"
        },
        {
          "field_a_term": "Floquet exponents (limit cycle stability)",
          "field_b_term": "robustness of the gamma oscillation to perturbations",
          "note": "Negative Floquet exponents indicate stable gamma; positive indicates burst oscillation instability"
        }
      ],
      "references": [
        {
          "doi": "10.1073/pnas.92.24.11189",
          "note": "Whittington et al. (1995) — interneuron-based gamma oscillation in the hippocampus"
        },
        {
          "doi": "10.1103/PhysRevLett.57.1799",
          "note": "Kuramoto (1984/Winfree) — coupled oscillator synchronisation"
        },
        {
          "doi": "10.1016/j.neuron.2005.09.023",
          "note": "Bartos et al. (2007) — synaptic mechanisms of synchronized gamma oscillations in inhibitory interneuron networks"
        }
      ],
      "last_reviewed": "2026-05-07",
      "file": "cross-domain/neuroscience-physics/b-neuronal-synchrony-ping-model.yaml"
    },
    {
      "id": "b-neuroplasticity-stdp",
      "title": "Spike-timing-dependent plasticity implements Hebbian learning through a physically measurable asymmetric time window that strengthens or weakens synapses based on millisecond-scale relative spike timing",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "STDP modifies synaptic conductance by an amount proportional to exp(-|dt|/tau) with sign determined by whether pre-synaptic firing precedes post-synaptic firing, implementing unsupervised Hebbian learning as a physical rule governed by calcium influx kinetics at NMDA receptors.",
      "open_unknowns": [],
      "related_hypotheses": [],
      "communication_gap": "Computational neuroscientists model STDP as abstract weight-update rules while biophysicists study calcium kinetics; the quantitative mapping between learning rules and molecular mechanisms is rarely made explicit across both literatures.",
      "translation_table": [
        {
          "field_a_term": "Hebbian coincidence detection",
          "field_b_term": "NMDA receptor voltage-and-ligand gating",
          "note": "Both require simultaneous pre- and post-synaptic activation; NMDA opens only when membrane is depolarized AND glutamate is bound"
        },
        {
          "field_a_term": "synaptic weight update delta-w",
          "field_b_term": "change in AMPA receptor conductance",
          "note": "Long-term potentiation (LTP) inserts AMPA receptors; LTD removes them; conductance change is the physical substrate of weight"
        },
        {
          "field_a_term": "STDP time window tau_plus ~ 20 ms",
          "field_b_term": "calcium transient decay constant",
          "note": "The ~20 ms window reflects the time course of calcium elevation following a back-propagating action potential"
        },
        {
          "field_a_term": "causal spike ordering (pre before post)",
          "field_b_term": "potentiation via calcium-calmodulin kinase II (CaMKII) activation",
          "note": "High calcium from near-coincident NMDA + bAP activates CaMKII, driving LTP"
        }
      ],
      "references": [
        {
          "doi": "10.1126/science.275.5297.213",
          "note": "Markram et al. (1997) Science - first quantitative demonstration of STDP in neocortical pyramidal neurons"
        },
        {
          "doi": "10.1523/JNEUROSCI.18-24-10464.1998",
          "note": "Bi & Poo (1998) J Neurosci - canonical STDP time window characterization in hippocampal neurons"
        },
        {
          "doi": "10.1038/nn.2479",
          "note": "Caporale & Dan (2008) Nat Rev Neurosci - STDP mechanisms and functional implications"
        }
      ],
      "last_reviewed": "2026-05-07",
      "file": "cross-domain/neuroscience-physics/b-neuroplasticity-stdp.yaml"
    },
    {
      "id": "b-openalex-stat-mech-memory-gating",
      "title": "LSTM gating dynamics implement a statistical-mechanics memory system where forget and input gates function as temperature-controlled annealing schedules that determine whether the cell state crystallises (remembers) or melts (forgets) incoming information.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "proposed",
      "bridge_claim": "Long short-term memory networks (Hochreiter & Schmidhuber 1997, 96 k citations) solve the vanishing gradient problem via gating mechanisms that selectively control information flow through time. Statistical mechanics offers a precise physical interpretation: the cell state c_t is an order parameter;",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-lstm-gating-stat-mech-phase-transition"
      ],
      "communication_gap": "Computational neuroscientists and ML engineers are unaware of statistical mechanics formalisms for memory and phase transitions. Statistical physicists have not engaged with the empirical literature on LSTM gate behavior. Both communities independently discovered gating/order-parameter mechanisms without recognising the equivalence.\n",
      "translation_table": [
        {
          "field_a_term": "order parameter (magnetisation)",
          "field_b_term": "cell state c_t",
          "note": "The slowly-changing macroscopic variable that encodes long-term memory"
        },
        {
          "field_a_term": "temperature T",
          "field_b_term": "forget gate activation f_t (sigmoid output)",
          "note": "High f_t = disordered/forgetting; low f_t = ordered/remembering"
        },
        {
          "field_a_term": "external field H",
          "field_b_term": "input gate i_t times candidate cell g_t",
          "note": "Biases the order parameter toward new information"
        },
        {
          "field_a_term": "phase transition",
          "field_b_term": "sudden shift between memory and forgetting modes",
          "note": "Abrupt changes in f_t correspond to task-relevant context boundaries"
        },
        {
          "field_a_term": "free energy minimum",
          "field_b_term": "trained LSTM attractor state for a sequence context",
          "note": "The stable configuration the network settles into for a given input context"
        }
      ],
      "references": [
        {
          "doi": "10.1162/neco.1997.9.8.1735",
          "note": "Hochreiter & Schmidhuber (1997) Long Short-Term Memory — 96,432 citations; primary reference"
        },
        {
          "doi": "10.1162/089976602760128056",
          "note": "Hopfield network capacity and statistical mechanics of associative memory"
        },
        {
          "doi": "10.1371/journal.pcbi.1003441",
          "note": "Sussillo & Barak (2013) — opening the black box of recurrent neural networks"
        }
      ],
      "last_reviewed": "2026-05-07",
      "file": "cross-domain/neuroscience-physics/b-openalex-stat-mech-memory-gating.yaml"
    },
    {
      "id": "b-photoreceptor-adaptation-weber-fechner-logarithmic",
      "title": "Photoreceptor light adaptation — the ability of rod and cone cells to maintain sensitivity across 10 orders of magnitude of light intensity — is explained by the Weber-Fechner law and logarithmic compression: the response is proportional to log(I/I₀), which maximizes information capacity given the biochemical noise floor and the statistics of natural scenes.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "Weber's law states ΔI/I = k (the just-noticeable difference is a constant fraction of background). Fechner's integration gives perceived magnitude S = k·log(I/I₀). Biophysically, photoreceptor adaptation is implemented by Ca²⁺-dependent feedback on guanylyl cyclase and PDE: the Hill function respons",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-efficient-coding-theory-explains-entire-retinal-circuit-design"
      ],
      "communication_gap": "Visual neuroscientists studying photoreceptor biophysics and information theorists studying efficient neural coding share the logarithmic compression framework, but the full quantitative connection from biophysical Ca²⁺ feedback to mutual information maximization is rarely presented in either neuroscience or physics curricula; the efficient coding derivation (Laughlin 1981) is a key bridge paper that remains underutilized in photoreceptor biology.\n",
      "translation_table": [
        {
          "field_a_term": "light adaptation / gain control (neuroscience)",
          "field_b_term": "logarithmic input-output compression (physics/information theory)",
          "note": "Ca2+-mediated feedback shifts I_half to maintain sensitivity; equivalent to log compression"
        },
        {
          "field_a_term": "Weber's law ΔI/I = const (neuroscience)",
          "field_b_term": "multiplicative noise model σ ∝ μ (physics)",
          "note": "Weber's law implies photon shot noise at high intensities dominates psychophysical threshold"
        },
        {
          "field_a_term": "photoreceptor dynamic range (neuroscience)",
          "field_b_term": "information capacity under constraint (information theory)",
          "note": "Log compression maximizes bits transmitted per unit of metabolic cost given neural noise"
        },
        {
          "field_a_term": "dark-adapted vs light-adapted sensitivity (neuroscience)",
          "field_b_term": "operating point on logarithmic transfer curve (physics)",
          "note": "Dark adaptation resets I_half to lowest background; light adaptation shifts operating point up"
        }
      ],
      "references": [
        {
          "doi": "10.1007/BF00336124",
          "note": "Laughlin (1981) - a simple coding procedure enhances a neuron's information capacity"
        },
        {
          "doi": "10.1523/JNEUROSCI.0013-07.2007",
          "note": "Sampath & Rieke (2004) - occlusion of rhodopsin recovery and the regulation of light adaptation"
        },
        {
          "doi": "10.1146/annurev.neuro.28.061604.135730",
          "note": "Fain et al. (2001) - adaptation in vertebrate photoreceptors (review)"
        }
      ],
      "last_reviewed": "2026-05-07",
      "file": "cross-domain/neuroscience-physics/b-photoreceptor-adaptation-weber-fechner-logarithmic.yaml"
    },
    {
      "id": "b-sensory-adaptation-weber-fechner",
      "title": "Sensory perception bridges neuroscience and physics through Weber-Fechner psychophysics: the nervous system compresses physical stimulus intensity logarithmically (Fechner) or as a power law (Stevens), with the neural implementation explained by efficient coding theory — sensory neurons maximize mutual information between stimuli and responses given metabolic constraints, naturally producing logarithmic compression.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "Weber's law (1834): the just noticeable difference ΔS for a stimulus of intensity S is proportional to S: ΔS/S = k (Weber fraction, constant per modality). For brightness, k ≈ 0.02; for weight, k ≈ 0.02; for sound frequency, k ≈ 0.003. Weber's law is a consequence of the signal-to-noise ratio of sen",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-efficient-coding-natural-statistics-sensory-cortex-universality"
      ],
      "communication_gap": "Weber and Fechner were 19th-century physiologists; Stevens was a 20th-century psychologist. The information-theoretic interpretation (Barlow 1961) came from a biologist writing at the intersection of cybernetics and neuroscience — a community that largely dissolved after the 1960s. Modern computational neuroscience (efficient coding, predictive coding) developed in the 1990s-2000s without strong connection to the classical psychophysics literature. Engineering (automatic gain control, logarithmic amplifiers) developed the same principles independently for radio and audio engineering. These three communities rarely cross-cite.\n",
      "translation_table": [
        {
          "field_a_term": "Weber fraction ΔS/S = k (psychophysics)",
          "field_b_term": "signal-to-noise ratio SNR = signal/(noise ∝ signal) = constant (physics)",
          "note": "Weber's law is the perceptual manifestation of multiplicative (Poisson-like) neural noise"
        },
        {
          "field_a_term": "Fechner's law Ψ = k·log(S/S₀)",
          "field_b_term": "decibel scale dB = 10·log₁₀(P/P₀) for sound/power (engineering acoustics)",
          "note": "decibel scale is exactly a Fechner scale — engineering independently discovered Fechner's law"
        },
        {
          "field_a_term": "Stevens' power law Ψ = k·Sⁿ (n ≠ 1)",
          "field_b_term": "power-law scaling in physics (fractal dimension, scaling relations near critical points)",
          "note": "power laws are scale-invariant; Stevens' law implies perception has no preferred reference point"
        },
        {
          "field_a_term": "shunting inhibition / divisive normalization (neural gain control)",
          "field_b_term": "automatic gain control (AGC) in radio receivers and amplifiers (engineering)",
          "note": "both dynamically adjust sensitivity to match dynamic range to current signal level"
        },
        {
          "field_a_term": "efficient coding / mutual information maximization (Barlow 1961)",
          "field_b_term": "channel capacity theorem (Shannon 1948) — optimal encoder maximizes I(X;Y)",
          "note": "efficient coding applies Shannon's channel capacity theory to biological neural codes"
        },
        {
          "field_a_term": "center-surround receptive field (lateral inhibition — retinal ganglion)",
          "field_b_term": "high-pass spatial filter / edge detection (image processing, signal processing)",
          "note": "lateral inhibition is the biological implementation of a spatial high-pass filter"
        }
      ],
      "references": [
        {
          "note": "Weber (1834) De Pulsu, Resorptione, Auditu et Tactu. Koehler, Leipzig"
        },
        {
          "note": "Fechner (1860) Elemente der Psychophysik. Breitkopf und Härtel, Leipzig"
        },
        {
          "doi": "10.1037/h0046500",
          "note": "Stevens (1957) On the psychophysical law; Psychol Rev 64:153"
        },
        {
          "note": "Barlow (1961) Possible principles underlying the transformation of sensory messages; in Rosenblith (ed.) Sensory Communication. MIT Press"
        }
      ],
      "last_reviewed": "2026-05-06",
      "file": "cross-domain/neuroscience-physics/b-sensory-adaptation-weber-fechner.yaml"
    },
    {
      "id": "b-synaptic-plasticity-hebbian-learning",
      "title": "Hebb's postulate, formalized as Hebbian correlation learning (ΔW = η·xᵢ·xⱼ), requires BCM sliding-threshold stabilization and is mechanistically implemented by NMDA-receptor coincidence detection and spike-timing-dependent plasticity — bridging the statistical physics of associative memory with molecular neuroscience.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "Hebb's (1949) postulate — \"neurons that fire together wire together\" — is formally expressed as ΔW_{ij} = η·xᵢ·xⱼ, a correlation-based learning rule that strengthens synaptic weight W_{ij} when pre-synaptic activity xⱼ and post-synaptic activity xᵢ co-occur. This rule is mathematically equivalent to",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-bcm-sliding-threshold-homeostatic-metaplasticity-cortical-map"
      ],
      "communication_gap": "The BCM rule and statistical physics of neural networks are published in journals like Physical Review Letters and Neural Computation, while the molecular neuroscience of NMDA receptors and CaMKII is in J Neuroscience, Neuron, and Nature Neuroscience. Computational neuroscientists rarely attend the same conferences as molecular neurobiologists. The Hopfield network (1982) and its connection to spin glasses (Amit et al. 1987) is known in physics but not widely understood by molecular neuroscientists. The STDP literature (mostly in experimental neuroscience) is rarely connected to the statistical learning theory community despite having deep formal links.\n",
      "translation_table": [
        {
          "field_a_term": "Hebbian synaptic weight update ΔW = η·xᵢ·xⱼ",
          "field_b_term": "Hopfield network energy minimization / Boltzmann machine learning"
        },
        {
          "field_a_term": "BCM sliding threshold θ_M ∝ ⟨xᵢ²⟩",
          "field_b_term": "homeostatic feedback / self-organized criticality regulation"
        },
        {
          "field_a_term": "NMDA receptor Mg²⁺ voltage gate (coincidence detection)",
          "field_b_term": "AND gate in Boolean logic / multiplicative nonlinearity"
        },
        {
          "field_a_term": "LTP (long-term potentiation, Ca²⁺ → CaMKII)",
          "field_b_term": "increase in coupling constant in statistical physics"
        },
        {
          "field_a_term": "STDP causal window (Δt > 0) → LTP",
          "field_b_term": "temporal asymmetry in learning rule / predictive coding"
        },
        {
          "field_a_term": "STDP acausal window (Δt < 0) → LTD",
          "field_b_term": "anti-Hebbian suppression of non-predictive correlations"
        },
        {
          "field_a_term": "triplet STDP (Pfister-Gerstner 2006)",
          "field_b_term": "higher-order correlations in statistical learning theory"
        }
      ],
      "references": [
        {
          "note": "Hebb (1949) The Organization of Behavior — Wiley (original postulate)"
        },
        {
          "doi": "10.1523/JNEUROSCI.02-01-00032.1982",
          "note": "Bienenstock et al. (1982) J Neurosci 2:32 — theory for the development of neuron selectivity (BCM rule)"
        },
        {
          "doi": "10.1126/science.275.5297.213",
          "note": "Markram et al. (1997) Science 275:213 — regulation of synaptic efficacy by coincidence of postsynaptic APs and EPSPs (STDP discovery)"
        },
        {
          "doi": "10.1523/JNEUROSCI.26-38-09673.2006",
          "note": "Pfister & Gerstner (2006) J Neurosci 26:9673 — triplets of spikes in a model of spike timing-dependent plasticity"
        }
      ],
      "last_reviewed": "2026-05-06",
      "file": "cross-domain/neuroscience-physics/b-synaptic-plasticity-hebbian-learning.yaml"
    },
    {
      "id": "b-memory-consolidation-synaptic-tagging",
      "title": "Synaptic tagging and capture (Frey & Morris 1997) provides a cellular mechanism for associative memory consolidation: E-LTP sets a molecular \"tag\" at the synapse within minutes, while late LTP requires new protein synthesis from the cell body captured hours later, connecting the neuroscience of plasticity to the psychology of memory encoding and temporal associations.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "Long-term potentiation (LTP) has two phases: early LTP (E-LTP, minutes, no new protein synthesis, PKA-dependent) and late LTP (L-LTP, hours to days, requires CREB-dependent transcription and new protein synthesis including Arc, BDNF, Homer). Frey & Morris (1997) demonstrated synaptic tagging and cap",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-sleep-rem-creative-insight-memory"
      ],
      "communication_gap": "STC was discovered by Frey & Morris (1997) in rodent hippocampal slices and remained primarily within molecular neuroscience. Cognitive psychologists studying memory consolidation (McGaugh, Nader) rarely cite STC; STC papers rarely cite the behavioral psychology literature on consolidation windows and emotional memory enhancement. Martin, Bhattacharya (behavioral tagging) explicitly bridged the in vitro molecular and behavioral literatures starting ~2003 but the synthesis has not fully reached clinical neuropsychology or educational psychology.\n",
      "translation_table": [
        {
          "field_a_term": "synaptic tag (activated CaMKII at weakly stimulated synapse)",
          "field_b_term": "memory trace or engram tag (psychology)",
          "note": "the molecular tag is the physical substrate of the psychological memory trace"
        },
        {
          "field_a_term": "plasticity-related proteins (PRPs — Arc, BDNF, Homer)",
          "field_b_term": "consolidation signal (psychology) — the process that converts short-term to long-term memory",
          "note": "PRP synthesis corresponds to the protein synthesis requirement for long-term memory consolidation"
        },
        {
          "field_a_term": "capture of PRPs by tagged synapse",
          "field_b_term": "associative memory binding (events consolidated together within temporal window)",
          "note": "STC temporal window (~1-2h) predicts the window for associative memory consolidation"
        },
        {
          "field_a_term": "L-LTP (synaptic weight change persisting >24h)",
          "field_b_term": "long-term memory formation (days to decades)",
          "note": "L-LTP is the cellular correlate of long-term memory; requires identical molecular machinery"
        }
      ],
      "references": [
        {
          "doi": "10.1113/jphysiol.1973.sp010273",
          "note": "Bliss & Lømo (1973) — Long-lasting potentiation of synaptic transmission; J Physiol 232:331"
        },
        {
          "doi": "10.1038/385533a0",
          "note": "Frey & Morris (1997) — Synaptic tagging and long-term potentiation; Nature 385:533"
        },
        {
          "doi": "10.1146/annurev.neuro.23.1.649",
          "note": "Martin et al. (2000) — Synaptic plasticity and memory; Annu Rev Neurosci 23:649"
        },
        {
          "doi": "10.1038/nrn3036",
          "note": "Redondo & Morris (2011) — Making memories last; Nat Rev Neurosci 12:17"
        }
      ],
      "last_reviewed": "2026-05-06",
      "file": "cross-domain/neuroscience-psychology/b-memory-consolidation-synaptic-tagging.yaml"
    },
    {
      "id": "b-bat-echolocation-fm-pulse-compression-sonar",
      "title": "Bat echolocation uses frequency-modulated (FM) calls that are mathematically equivalent to FM pulse compression in radar/SONAR engineering: the linear frequency sweep creates a time-bandwidth product that enables range resolution far exceeding a simple tone pulse, and the auditory system computes the ambiguity function implicitly to localize prey.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "An FM chirp s(t) = A·cos(2π(f₀t + ½μt²)) (μ = chirp rate, BW = μ·T) has pulse compression ratio PCR = BW·T >> 1, giving range resolution δr = c/(2·BW) while retaining high energy (SNR = A²T/(2N₀)) from the long pulse duration T. The bat's auditory system effectively computes the matched filter cross",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-bat-echolocation-inspired-wideband-sonar-underwater"
      ],
      "communication_gap": "The mathematical equivalence between bat echolocation and radar signal processing was recognized by Simmons and Suga in the 1970s-80s, but neuroscientists studying auditory processing and signal processing engineers rarely collaborate; modern computational neuroscience of echolocation is rediscovering ambiguity function theory with renewed interest in neuromorphic sonar systems.\n",
      "translation_table": [
        {
          "field_a_term": "FM echolocation call (neuroscience)",
          "field_b_term": "linear frequency-modulated chirp waveform (signal processing)",
          "note": "Identical mathematical form; bat vocal-motor system generates the radar-equivalent waveform"
        },
        {
          "field_a_term": "biosonar range resolution (neuroscience)",
          "field_b_term": "pulse compression ratio / range resolution δr = c/2BW (signal processing)",
          "note": "Bat range resolution of ~1 mm corresponds to ~1 ms FM bandwidth of ~100 kHz"
        },
        {
          "field_a_term": "auditory delay-tuned neurons (neuroscience)",
          "field_b_term": "matched filter / correlator bank (signal processing)",
          "note": "Delay-tuned neurons in mustached bat's IC implement cross-correlation at specific delay τ"
        },
        {
          "field_a_term": "CF-FM bat vs FM-only bat (neuroscience)",
          "field_b_term": "velocity-optimized vs range-optimized radar waveform (signal processing)",
          "note": "CF component enables Doppler processing; FM component enables high range resolution"
        }
      ],
      "references": [
        {
          "doi": "10.1126/science.178.4059.449",
          "note": "Simmons (1972) - bat sonar echo delay resolution and acuity as a pulse-compression system"
        },
        {
          "doi": "10.1038/284462a0",
          "note": "Suga (1990) - cortical computational maps for auditory space and biosonar target features"
        },
        {
          "doi": "10.1121/1.381611",
          "note": "Kroszczynski (1969) - pulse compression by means of linear-period modulation (FM chirp theory)"
        }
      ],
      "last_reviewed": "2026-05-07",
      "file": "cross-domain/neuroscience-signal-processing/b-bat-echolocation-fm-pulse-compression-sonar.yaml"
    },
    {
      "id": "b-bci-optimal-decoding",
      "title": "Brain-computer interfaces achieve maximum information transfer rate when neural population activity is decoded using optimal Bayesian filters, connecting neuroscience spike train statistics to the signal processing framework of Kalman filtering and Fisher information bounds.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "The problem of decoding motor intent from neural population activity is an optimal state estimation problem: spike trains from N neurons encode a low-dimensional movement state x(t) with Fisher information I(x) = sum_i (f'_i)^2 / f_i where f_i is each neuron's tuning curve; the Cramer-Rao bound 1/I(",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-bci-information-rate-fisher-bound"
      ],
      "communication_gap": "Neuroscientists analyze neural tuning and population codes while signal processing engineers develop optimal filtering algorithms; BCI researchers bridge these areas in practice but the theoretical unification via the Fisher-information/Cramer-Rao framework is not systematically taught in either neuroscience or engineering training.\n",
      "translation_table": [
        {
          "field_a_term": "neural population code (neuroscience)",
          "field_b_term": "observation model y = Cx + v in state-space signal processing",
          "note": "The tuning curve matrix C and noise covariance V define the measurement model for Kalman decoding"
        },
        {
          "field_a_term": "Fisher information of neural population (neuroscience)",
          "field_b_term": "Cramer-Rao bound on decoding error variance (signal processing)",
          "note": "Population Fisher information sets the minimum MSE achievable by any unbiased decoder"
        },
        {
          "field_a_term": "spike train variability / Fano factor (neuroscience)",
          "field_b_term": "measurement noise covariance matrix R (signal processing)",
          "note": "Poisson-like spike variability is the dominant noise source limiting BCI decoding accuracy"
        },
        {
          "field_a_term": "neural manifold / latent dynamics (neuroscience)",
          "field_b_term": "state-space model process equation x(t+1) = Ax(t) + w(t) (signal processing)",
          "note": "Low-dimensional neural dynamics are the process model; Kalman filter exploits this structure"
        }
      ],
      "references": [
        {
          "doi": "10.1126/science.7535infrastructure",
          "note": "Wu et al. (2006) - Gaussian-process factor analysis for BCI decoding"
        },
        {
          "doi": "10.1038/nn.2736",
          "note": "Shenoy, Sahani & Churchland (2013) - cortical control of arm movements and neural decoding"
        },
        {
          "doi": "10.1109/TNSRE.2004.835287",
          "note": "Wu, Gao & Shenoy (2004) - Kalman filter decoder for BCI achieving Cramer-Rao bound"
        }
      ],
      "last_reviewed": "2026-05-07",
      "file": "cross-domain/neuroscience-signal-processing/b-bci-optimal-decoding.yaml"
    },
    {
      "id": "b-collective-intelligence-swarm",
      "title": "Collective Intelligence and Swarm Cognition — wisdom of crowds, bee quorum sensing, ant pheromone optimisation, and murmuration phase transitions link neuroscience to social decision-making",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "Groups can exhibit collective intelligence exceeding individual expertise under specific conditions. The wisdom of crowds (Galton 1907): 787 estimates of an ox's weight at a county fair averaged to 1207 lbs — the true weight was 1198 lbs. Surowiecki (2004) identified four necessary conditions: indep",
      "open_unknowns": [],
      "related_hypotheses": [],
      "communication_gap": "Research on human collective intelligence (social psychology, economics, political science), animal swarm intelligence (ethology, evolutionary biology), and the physics of collective motion (statistical physics) are largely separate literatures. The mathematical connections — particularly the shared framework of phase transitions, positive feedback, and inhibition — are rarely made explicit. The neuroscience-social science bridge is weak: neuroscientists study individual decision-making; social scientists study group-level outcomes; the mechanistic level connecting neural circuits to social computation is largely unexplored.\n",
      "translation_table": [
        {
          "field_a_term": "independence, diversity, aggregation (wisdom of crowds conditions)",
          "field_b_term": "conditions for accurate collective judgment exceeding individual expertise",
          "note": "Violation of any condition (correlation, hierarchy, cascade) degrades collective intelligence below individual level"
        },
        {
          "field_a_term": "pheromone evaporation rate (ACO parameter)",
          "field_b_term": "exploration-exploitation balance in ant foraging",
          "note": "High evaporation rate: more exploration, slow convergence; low rate: fast convergence but risk of local optima"
        },
        {
          "field_a_term": "bee quorum threshold (N scouts at one site)",
          "field_b_term": "distributed decision rule without central controller",
          "note": "Quorum sensing is robust: increases threshold → slower but more accurate decisions; used for site quality discrimination"
        },
        {
          "field_a_term": "stop signal (head-butting competing dancing scouts)",
          "field_b_term": "lateral inhibition mechanism preventing commitment to suboptimal choice",
          "note": "Stop signal rate is proportional to dance vigor for competing sites — implements weighted voting with inhibition"
        },
        {
          "field_a_term": "topological interaction (align with 6–7 nearest neighbours)",
          "field_b_term": "scale-free correlation in murmuration enabling coherent collective response",
          "note": "Topological (not metric) neighbourhood makes flocking robust to density variation — perturbations propagate across entire flock"
        },
        {
          "field_a_term": "order-disorder phase transition in collective motion",
          "field_b_term": "ferromagnetic-like alignment transition in animal flocks",
          "note": "Vicsek model (1995): above critical density, local alignment rules produce spontaneous symmetry breaking → ordered flock"
        }
      ],
      "references": [
        {
          "note": "Galton (1907) Nature 75:450 — wisdom of crowds in weight estimation"
        },
        {
          "note": "Surowiecki (2004) The Wisdom of Crowds — conditions for collective intelligence"
        },
        {
          "doi": "10.1038/nature06832",
          "note": "Couzin (2009) Nature 445:715 — collective minds in animal groups"
        },
        {
          "doi": "10.1126/science.1210361",
          "note": "Seeley et al. (2012) Science 335:108 — stop signals and quorum sensing in honeybees"
        }
      ],
      "last_reviewed": "2026-05-06",
      "file": "cross-domain/neuroscience-social-science/b-collective-intelligence-swarm.yaml"
    },
    {
      "id": "b-decision-neuroscience-neuroeconomics",
      "title": "Neuroeconomics bridges behavioral economics and decision neuroscience by mapping economic utility functions onto neural substrates: vmPFC encodes subjective value, anterior insula encodes aversion, the beta-delta model of intertemporal choice maps to differential limbic vs. dlPFC activation, and TPJ computes fairness in social decisions — moving economics from axiomatic to mechanistic.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "Neuroeconomics (Rangel et al. 2008) is the project of finding the neural implementation of economic choice processes. Ventromedial PFC (vmPFC) encodes subjective value: BOLD signal in vmPFC correlates with willingness-to-pay across items, consistent with a common currency for reward value (Plassmann",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-beta-delta-neuroeconomics-dual-system"
      ],
      "communication_gap": "Behavioral economists rarely read Journal of Neuroscience; decision neuroscientists rarely engage with the full econometric toolkit of behavioral economics. The two communities use different methodological standards: economics requires incentive-compatible designs and structural models; neuroscience requires careful experimental controls and effect size estimation. The controversies (McClure dual-system model is contested; vmPFC reference dependence is debated) have been difficult to resolve because each community interprets the shared data through different theoretical frameworks and methodological lenses.\n",
      "translation_table": [
        {
          "field_a_term": "utility function U(x) (economics)",
          "field_b_term": "subjective value signal in vmPFC (neuroscience)",
          "note": "vmPFC BOLD signal is the neural correlate of the economic utility function — not fixed but reference-dependent"
        },
        {
          "field_a_term": "risk attitude (risk premium in expected utility)",
          "field_b_term": "anterior insula / amygdala activation under uncertainty",
          "note": "Insula activity predicts behavioral risk aversion independently of the expected value signal"
        },
        {
          "field_a_term": "hyperbolic discounting (beta-delta model)",
          "field_b_term": "limbic vs. PFC temporal competition (McClure model)",
          "note": "Beta-delta maps onto a dual-system neural architecture — now partially contested by single-system models"
        },
        {
          "field_a_term": "Ultimatum Game rejection (behavioral economics)",
          "field_b_term": "insula activation predicting rejection of unfair offers",
          "note": "Sanfey et al. (2003) showed insula activity predicts rejection — evidence of inequity aversion"
        },
        {
          "field_a_term": "reference-dependent preferences (Kahneman-Tversky)",
          "field_b_term": "reference-dependent value coding in vmPFC and striatum",
          "note": "Neural activity encodes gain/loss relative to reference point — consistent with prospect theory"
        },
        {
          "field_a_term": "Ellsberg paradox (ambiguity aversion)",
          "field_b_term": "amygdala activation for unknown (Knightian) vs. known probabilities",
          "note": "Amygdala activity is specifically elevated for ambiguous (unknown probability) choices"
        }
      ],
      "references": [
        {
          "note": "Plassmann et al. (2007) — Orbitofrontal cortex encodes willingness to pay in everyday economic transactions",
          "doi": "10.1523/JNEUROSCI.4117-06.2007"
        },
        {
          "note": "McClure et al. (2004) — Separate neural systems value immediate and delayed monetary rewards",
          "doi": "10.1126/science.1100907"
        },
        {
          "note": "Sanfey et al. (2003) — The neural basis of economic decision-making in the Ultimatum Game",
          "doi": "10.1126/science.1082976"
        },
        {
          "note": "Rangel, Camerer & Montague (2008) — A framework for studying the neurobiology of value-based decision making",
          "doi": "10.1038/nrn2357"
        }
      ],
      "last_reviewed": "2026-05-06",
      "file": "cross-domain/neuroscience-social-science/b-decision-neuroscience-neuroeconomics.yaml"
    },
    {
      "id": "b-social-neuroscience-group-behavior",
      "title": "The mentalizing network (mPFC/TPJ/pSTS), social pain circuitry (dACC), and oxytocin-modulated trust form a neurobiological substrate for group-level social dynamics — social neuroscience makes the mechanisms of tribal economics, in-group cooperation, and social exclusion measurable as brain states.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "Social neuroscience formalises the neural mechanisms underlying social behaviour that economists, sociologists, and political scientists have described at the group level, creating a multi-level account of cooperation, conflict, and inequality.\nMentalizing network (Theory of Mind): three core region",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-social-pain-dACC-health-outcomes-mediation"
      ],
      "communication_gap": "Social neuroscience emerged from neuroscience (looking for brain bases of social behaviour) while social science developed macro-level theories without neural constraint. Economists built formal models of trust and cooperation (game theory) without biological grounding. The Kosfeld et al. (2005) oxytocin paper appeared in Nature and created enormous media and scientific attention, but social scientists were often sceptical of reductionism — behavioural economics and social neuroscience have only partially integrated. The mirror neuron controversy has made neuroscientists cautious about strong claims, while social scientists interpret this as the entire bridging programme being suspect. The social baseline theory has not penetrated mainstream sociology or political science.\n",
      "translation_table": [
        {
          "field_a_term": "mPFC/TPJ/pSTS mentalizing network activation",
          "field_b_term": "Theory of Mind capacity (social understanding of others' beliefs)",
          "note": "fMRI localisation of a cognitive capacity studied for decades in developmental psychology"
        },
        {
          "field_a_term": "dACC activation during social exclusion",
          "field_b_term": "social pain (psychological experience of ostracism)",
          "note": "bridges neuroscience measurement to social psychological construct"
        },
        {
          "field_a_term": "oxytocin receptor density in nucleus accumbens and amygdala",
          "field_b_term": "in-group trust level in public goods games (economics)",
          "note": "neuropeptide system maps onto economic game-theoretic parameter"
        },
        {
          "field_a_term": "mirror neuron discharge pattern (observation + execution)",
          "field_b_term": "imitation learning and cultural transmission rate",
          "note": "contested: direct neuroscience-level explanation of anthropological observation"
        },
        {
          "field_a_term": "allostatic load reduction with social proximity (right PFC)",
          "field_b_term": "health benefits of social integration (epidemiology)",
          "note": "social baseline theory mechanistically explains Holt-Lunstad meta-analysis results"
        },
        {
          "field_a_term": "parochial altruism (oxytocin increases in-group, decreases out-group cooperation)",
          "field_b_term": "ethnic homogeneity effects on public goods provision (Putnam 2007)",
          "note": "same behavioural pattern described at different levels of analysis"
        }
      ],
      "references": [
        {
          "doi": "10.1126/science.1130726",
          "note": "Frith & Frith (2006) Science 312:1016 — mentalizing network and Theory of Mind"
        },
        {
          "doi": "10.1126/science.1089134",
          "note": "Eisenberger et al. (2003) Science 302:290 — social exclusion activates dACC (social pain)"
        },
        {
          "doi": "10.1038/nature03701",
          "note": "Kosfeld et al. (2005) Nature 435:673 — oxytocin increases human trust"
        },
        {
          "doi": "10.1016/j.copsyc.2014.12.013",
          "note": "Coan & Sbarra (2015) Curr Opin Psychol 1:15 — social baseline theory"
        }
      ],
      "last_reviewed": "2026-05-06",
      "file": "cross-domain/neuroscience-social-science/b-social-neuroscience-group-behavior.yaml"
    },
    {
      "id": "b-neuronal-avalanches-soc-power-law",
      "title": "Neuronal avalanches - cascades of neural activity with power-law size distributions - are proposed to arise from self-organised criticality: the cortex tunes itself to a critical point that maximises dynamic range, information capacity, and inter-area coordination, making SOC statistical physics the quantitative framework for understanding brain-wide signal propagation.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "contested",
      "bridge_claim": "Beggs & Plenz (2003) showed that LFP activity in cultured cortical slices exhibits avalanches with size distributions P(s) ~ s^{-3/2} and duration distributions P(T) ~ T^{-2}, matching the mean-field predictions of directed percolation at criticality. The branching ratio sigma (average number of neu",
      "open_unknowns": [],
      "related_hypotheses": [],
      "communication_gap": "Neuroscientists import SOC vocabulary without fully engaging with the statistical physics machinery for testing universality class and excluding confounds; statistical physicists rarely account for biophysical constraints (refractoriness, synaptic depression) that modify the critical point.\n",
      "translation_table": [
        {
          "field_a_term": "critical branching process (statistical physics)",
          "field_b_term": "cortical network at sigma = 1 (neuroscience)",
          "note": "Sigma ~ 1 in anesthetised and awake cortex; deviation from 1 correlates with brain state"
        },
        {
          "field_a_term": "power-law avalanche size distribution P(s) ~ s^{-3/2} (statistical physics)",
          "field_b_term": "local field potential avalanche size histogram (neuroscience)",
          "note": "Exponent -3/2 is mean-field directed percolation; deviations indicate finite-size or sub-sampling effects"
        },
        {
          "field_a_term": "dynamic range Delta (statistical physics)",
          "field_b_term": "range of stimulus intensities over which the cortex discriminates (neuroscience)",
          "note": "Theoretical maximum of Delta occurs at sigma = 1; measured dynamic range peaks near criticality"
        },
        {
          "field_a_term": "universality class (statistical physics)",
          "field_b_term": "invariance of avalanche exponents across recording methods and brain states (neuroscience)",
          "note": "Contested: different electrode arrays and species give different exponents"
        }
      ],
      "references": [
        {
          "doi": "10.1523/JNEUROSCI.23-35-11167.2003",
          "note": "Beggs & Plenz (2003) - neuronal avalanches in neocortical circuits; foundational paper"
        },
        {
          "doi": "10.1103/PhysRevLett.76.5357",
          "note": "Bak & Tang (1988) - self-organized criticality; original SOC framework"
        },
        {
          "doi": "10.1371/journal.pcbi.1002119",
          "note": "Priesemann et al. (2014) - spike avalanches in vivo suggest a driven, slightly subcritical brain state"
        }
      ],
      "last_reviewed": "2026-05-07",
      "file": "cross-domain/neuroscience-statistical-physics/b-neuronal-avalanches-soc-power-law.yaml"
    },
    {
      "id": "b-bayesian-brain-predictive-processing",
      "title": "The brain implements approximate Bayesian inference — perception equals likelihood times prior divided by evidence — and neural populations encode probability distributions, making predictive processing (Helmholtz's unconscious inference) a formal instantiation of Bayes' theorem in cortical circuits.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "Helmholtz (1867) proposed that perception is \"unconscious inference\" — the brain uses prior knowledge to resolve ambiguous sensory input. This informal insight has been formalised into the Bayesian brain hypothesis: sensory perception is the computation of the posterior\n\n  P(cause | sensation) ∝ P(s",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-predictive-processing-psychosis"
      ],
      "communication_gap": "Helmholtz's insight (1867) predated Bayesian statistics; the formal Bayesian brain formulation arrived with Knill & Pouget (2004) and Friston (2005) but remains specialised literature in computational neuroscience. Most experimental neuroscientists record single neurons rather than measuring population-level probabilistic codes; the statistical framework requires population recordings and Bayesian model fitting that most labs lack. The free energy principle literature (Friston) is mathematically dense and has a reputation for being difficult to interpret, slowing adoption by the experimental community.\n",
      "translation_table": [
        {
          "field_a_term": "Prior P(cause)",
          "field_b_term": "Neural prior — baseline expectation from past experience and top-down context",
          "note": "Encoded in the synaptic weights of top-down connections in cortical hierarchies"
        },
        {
          "field_a_term": "Likelihood P(sensation | cause)",
          "field_b_term": "Sensory evidence carried by bottom-up afferent signals",
          "note": "Precision-weighted prediction errors in layer II/III superficial pyramidal cells"
        },
        {
          "field_a_term": "Posterior P(cause | sensation)",
          "field_b_term": "Percept — the brain's inferred cause of the sensory input",
          "note": "Approximately computed by loopy belief propagation in cortical circuits"
        },
        {
          "field_a_term": "Bayesian updating (prior → posterior)",
          "field_b_term": "Perceptual learning — prior is updated by prediction errors over time",
          "note": "Corresponds to Hebbian-like synaptic changes driven by persistent prediction error"
        },
        {
          "field_a_term": "Precision (inverse variance 1/σ²)",
          "field_b_term": "Attention — relative gain of prediction error signals",
          "note": "High-precision likelihood = high attention to sensory channel; modulated by ACh and NE neuromodulators"
        },
        {
          "field_a_term": "Prediction error (residual)",
          "field_b_term": "Surprise signal — ascending from lower to higher cortical areas",
          "note": "Minimizing prediction error = perception + action; Free Energy Principle of Friston (2005)"
        },
        {
          "field_a_term": "Bayesian model selection (evidence)",
          "field_b_term": "Perceptual hypothesis testing — why does the brain choose one interpretation over another?",
          "note": "Model evidence = log marginal likelihood; the brain minimises free energy ≈ surprisal"
        }
      ],
      "references": [
        {
          "note": "Helmholtz (1867) Handbuch der physiologischen Optik — unconscious inference and perception as hypothesis testing",
          "url": "https://archive.org/details/handbuchderphysi00helm"
        },
        {
          "doi": "10.1038/nn741",
          "note": "Weiss, Simoncelli & Adelson (2002) Nat Neurosci 5:598 — motion perception as Bayesian inference; slow-speed prior"
        },
        {
          "doi": "10.1016/j.tins.2004.10.007",
          "note": "Knill & Pouget (2004) Trends Neurosci 27:712 — neural population codes as probability distributions"
        },
        {
          "doi": "10.1098/rstb.2005.1622",
          "note": "Friston (2005) Phil Trans R Soc B 360:1023 — free energy principle and predictive processing"
        },
        {
          "doi": "10.1038/nrn3136",
          "note": "Carandini & Heeger (2012) Nat Rev Neurosci 13:51 — divisive normalization as canonical computation"
        }
      ],
      "last_reviewed": "2026-05-05",
      "file": "cross-domain/neuroscience-statistics/b-bayesian-brain-predictive-processing.yaml"
    },
    {
      "id": "b-neuroimaging-connectivity-graphical-models",
      "title": "Functional brain connectivity measured by fMRI BOLD signals is estimated using partial correlations and Gaussian graphical models (GGMs): the inverse covariance matrix Θ = Σ^{-1} encodes conditional independence structure where Θ_{ij} ≠ 0 iff brain regions i and j are directly connected controlling for all other regions, providing a sparse graph of functional brain networks",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "The partial correlation between brain regions i and j (controlling for all other regions) equals -Θ_{ij}/√(Θ_{ii}*Θ_{jj}) where Θ = Σ^{-1} is the precision matrix of BOLD fMRI time series; estimating Θ with graphical lasso (L1 regularization: min_Θ[-log det Θ + tr(SΘ) + λ||Θ||_1]) produces a sparse ",
      "open_unknowns": [],
      "related_hypotheses": [],
      "communication_gap": "Neuroscientists analyze fMRI connectivity using correlation matrices as neuroscience tools while statisticians develop graphical models and covariance estimation theory; the mathematical distinction between marginal and partial correlation (full vs precision matrix) is fundamental but often conflated in neuroimaging studies, leading to spurious connectivity reports.",
      "translation_table": [
        {
          "field_a_term": "functional brain connectivity (neuroscience)",
          "field_b_term": "non-zero entries in precision matrix Θ = Σ^{-1} (statistics)",
          "note": "Functional connectivity = correlation; effective connectivity from partial correlations = precision matrix structure"
        },
        {
          "field_a_term": "seed-based correlation analysis in fMRI (neuroscience)",
          "field_b_term": "marginal correlation (full correlation) ignoring all other variables (statistics)",
          "note": "Seed correlations include indirect paths; partial correlations (precision matrix) remove indirect associations"
        },
        {
          "field_a_term": "default mode network, salience network segregation (neuroscience)",
          "field_b_term": "community structure in sparse Gaussian graphical model (statistics)",
          "note": "Brain network modules correspond to densely connected subgraphs in the precision-matrix-estimated graph"
        },
        {
          "field_a_term": "dynamic functional connectivity (neuroscience)",
          "field_b_term": "non-stationary covariance estimation with sliding window or HMM (statistics)",
          "note": "Time-varying Σ(t) estimated by windowed graphical lasso captures state-dependent brain network configurations"
        }
      ],
      "references": [
        {
          "doi": "10.1093/biostatistics/kxm045",
          "note": "Friedman et al. (2008) - graphical lasso for sparse precision matrix estimation"
        },
        {
          "doi": "10.1016/j.neuroimage.2013.04.038",
          "note": "Smith et al. (2013) - functional connectomics from resting state fMRI"
        },
        {
          "doi": "10.1016/j.neuroimage.2010.10.003",
          "note": "Varoquaux & Craddock (2013) - learning and comparing functional connectomes"
        }
      ],
      "last_reviewed": "2026-05-07",
      "file": "cross-domain/neuroscience-statistics/b-neuroimaging-connectivity-graphical-models.yaml"
    },
    {
      "id": "b-spike-sorting-blind-source-separation",
      "title": "Spike sorting — decomposing extracellular recordings into contributions from individual neurons — is mathematically identical to blind source separation (ICA/cocktail party problem), with Bayesian spike sorters implementing probabilistic mixture models over waveform shapes and interspike interval statistics.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "EXTRACELLULAR RECORDING MIXING MODEL: A recording electrode at position x measures a weighted sum of spike waveforms from N nearby neurons:\n\n  y(t) = Σᵢ Aᵢ · sᵢ(t) + noise\n\nwhere Aᵢ = mixing matrix entry (spatial weight), sᵢ(t) = binary spike train of neuron i. This is the BSS mixing model.\nICA SOLU",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-spike-sorting-drift-correction-manifold"
      ],
      "communication_gap": "Neuroscientists developing spike sorters rarely read the BSS literature. Signal processing researchers do not engage with neuroscience spike sorting benchmarks. The mathematical isomorphism is noted in Lewicki (1998) but the fields remain largely separate. Deep learning spike sorters (Kilosort 2, YASS) rediscovered mixture-model approaches from the ML literature.\n",
      "translation_table": [
        {
          "field_a_term": "spike waveform template (neuroscience)",
          "field_b_term": "source signal basis function (signal processing)",
          "note": "Each neuron's waveform is the source signal; its detection across electrodes provides the unmixing"
        },
        {
          "field_a_term": "electrode recording (neuroscience)",
          "field_b_term": "mixture observation (blind source separation)",
          "note": "Multiple tetrode or silicon probe channels provide the equivalent of multiple microphones"
        },
        {
          "field_a_term": "neuron isolation / cluster assignment",
          "field_b_term": "source recovery in BSS",
          "note": "Assigning spikes to neurons = assigning observations to source components"
        },
        {
          "field_a_term": "interspike interval refractory period",
          "field_b_term": "temporal structure constraint on source",
          "note": "The refractory period (no two spikes <1ms apart) is a statistical constraint that helps BSS algorithms"
        }
      ],
      "references": [
        {
          "doi": "10.1162/neco.1995.7.6.1129",
          "note": "Bell & Sejnowski (1995) Neural Comput 7:1129 — ICA infomax algorithm"
        },
        {
          "doi": "10.1016/0165-0270(96)00052-4",
          "note": "Fee et al. (1996) J Neurosci Methods 69:175 — early spike sorting approach"
        },
        {
          "doi": "10.1101/061481",
          "note": "Pachitariu et al. (2016) bioRxiv — Kilosort Bayesian spike sorter"
        },
        {
          "note": "Lewicki (1998) Network 9:R53 — BSS review connecting to neuroscience"
        }
      ],
      "last_reviewed": "2026-05-06",
      "file": "cross-domain/neuroscience-statistics/b-spike-sorting-blind-source-separation.yaml"
    },
    {
      "id": "b-a-stability-region-x-time-stepping-reaction-diffusion",
      "title": "A-stability and stiffness-aware time stepping connect numerical-analysis stability regions to physically faithful reaction-diffusion simulation under multiscale kinetics.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "Reaction-diffusion systems often combine fast reactive modes with slower transport scales, making explicit integrators unstable at practical timesteps. Stability-region analysis from numerical analysis directly determines which discretizations preserve qualitative dynamics without introducing spurio",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-imex-time-stepping-expands-stable-reaction-diffusion-cfl"
      ],
      "communication_gap": "Domain papers often tune timesteps empirically; numerical analysts emphasize spectral constraints. Bridging the two improves reproducibility and interpretability of simulated mechanisms.\n",
      "translation_table": [
        {
          "field_a_term": "A-stability / L-stability of time integrators",
          "field_b_term": "bounded simulation under stiff reaction eigenmodes",
          "note": "Integrator choice controls whether fast-decaying modes remain numerically well behaved."
        },
        {
          "field_a_term": "IMEX splitting",
          "field_b_term": "implicit treatment of stiff chemistry + explicit transport update",
          "note": "Splitting trades nonlinear solve cost for larger stable timestep windows."
        },
        {
          "field_a_term": "absolute stability region in complex plane",
          "field_b_term": "admissible dt relative to Jacobian spectrum",
          "note": "Spectral estimates provide actionable timestep control policies."
        }
      ],
      "references": [
        {
          "doi": "10.1017/S096249290200001X",
          "note": "Hairer (2002), long-time and structure-preserving insights relevant to stability-sensitive integration."
        }
      ],
      "last_reviewed": "2026-05-08",
      "file": "cross-domain/numerical-analysis-physics/b-a-stability-region-x-time-stepping-reaction-diffusion.yaml"
    },
    {
      "id": "b-symbolic-regression-x-sparse-sensor-pde-structure-discovery",
      "title": "Sparse symbolic regression bridges numerical methods with experimental design by recovering parsimonious governing terms from limited measurements reminiscent of PDE discovery workflows.",
      "source_domain": "",
      "target_domain": "",
      "status": "proposed",
      "bridge_claim": "Literature-backed methodology (SINDy family): sparse regression across candidate libraries can recover dynamical terms when noise and collinearity are controlled; speculative analogy for sparse sensing—sensor placement and bandwidth jointly determine identifiability similarly to experimental design ",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-sparse-sensor-placement-improves-pde-structure-recovery"
      ],
      "communication_gap": "Symbolic regression papers emphasize success cases while experimental labs prioritize hardware constraints that shrink effective derivative SNR.",
      "translation_table": [
        {
          "field_a_term": "candidate function library",
          "field_b_term": "phenomenological term dictionary for PDEs",
          "note": "Library misspecification yields false positives."
        },
        {
          "field_a_term": "sparsity-promoting regression",
          "field_b_term": "parsimony priors / Occam criteria",
          "note": "Hyperparameters interact with noise floors."
        },
        {
          "field_a_term": "trajectory snapshots",
          "field_b_term": "sparse sensor time series",
          "note": "Coarsening changes finite-difference derivative estimates."
        }
      ],
      "references": [
        {
          "arxiv": "1509.03580",
          "note": "Sparse identification of nonlinear dynamics (SINDy) foundational reference."
        }
      ],
      "last_reviewed": "2026-05-09",
      "file": "cross-domain/numerical-analysis-physics/b-symbolic-regression-x-sparse-sensor-pde-structure-discovery.yaml"
    },
    {
      "id": "b-ocean-gyres-hamiltonian-chaos-kam-tori",
      "title": "Ocean gyre boundaries and Lagrangian coherent structures are governed by Hamiltonian chaos theory: KAM tori form transport barriers while chaotic seas drive mixing, mapping ocean circulation onto the mathematical theory of nearly-integrable Hamiltonian systems.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "The 2-D incompressible ocean surface flow is a Hamiltonian system with the stream function ψ(x,y,t) as the Hamiltonian. In steady flow, streamlines are KAM tori — invariant curves that block cross-gyre transport. Time-dependence (tides, eddies) perturbs the system; KAM theory predicts that most tori",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-lcs-ftle-cross-gyre-pollutant-transport"
      ],
      "communication_gap": "Physical oceanographers and mathematical dynamicists have collaborated productively since the 1990s (Wiggins, Haller), but operational ocean forecasters rarely use Hamiltonian chaos diagnostics; FTLE computation remains computationally expensive and unfamiliar to most physical oceanography practitioners.\n",
      "translation_table": [
        {
          "field_a_term": "ocean stream function ψ(x,y,t) (oceanography)",
          "field_b_term": "Hamiltonian H(q,p,t) (dynamical systems)",
          "note": "Particle trajectories obey ẋ = ∂ψ/∂y, ẏ = −∂ψ/∂x — canonical Hamilton equations"
        },
        {
          "field_a_term": "gyre boundary / subtropical front (oceanography)",
          "field_b_term": "KAM torus / invariant manifold (dynamical systems)",
          "note": "Both are transport barriers that fluid parcels do not cross under laminar conditions"
        },
        {
          "field_a_term": "eddy-driven cross-gyre mixing (oceanography)",
          "field_b_term": "chaotic transport in resonance overlap region (dynamical systems)",
          "note": "Time-periodic perturbations destroy KAM tori via Chirikov resonance overlap criterion"
        },
        {
          "field_a_term": "Lagrangian coherent structure / FTLE ridge (oceanography)",
          "field_b_term": "finite-time stable/unstable manifold (dynamical systems)",
          "note": "FTLE ridges approximate stable manifolds of hyperbolic fixed points in finite time"
        }
      ],
      "references": [
        {
          "doi": "10.1016/j.physd.2000.10.004",
          "note": "Haller & Yuan (2000) — Lagrangian coherent structures and finite-time Lyapunov exponents"
        },
        {
          "doi": "10.1063/1.166188",
          "note": "Wiggins (2005) — the dynamical systems approach to Lagrangian ocean transport"
        },
        {
          "doi": "10.1017/S0022112005008682",
          "note": "Beron-Vera et al. (2008) — KAM tori and ocean gyre circulation"
        }
      ],
      "last_reviewed": "2026-05-07",
      "file": "cross-domain/oceanography-dynamical-systems/b-ocean-gyres-hamiltonian-chaos-kam-tori.yaml"
    },
    {
      "id": "b-lcs-retention-zone-x-coastal-larval-supply",
      "title": "Finite-time Lyapunov exponent ridges (Lagrangian coherent structures) identify transient transport barriers and retention pockets near fronts and capes — quantities coastal ecology links to larval retention and settlement hotspots when biological mortality is weak relative to advection time scales.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "Physical oceanography computes FTLE/LCS fields from velocity products to visualize where parcels remain coherent or escape along ridges; marine larval ecology hypothesizes that prolonged residence near coastlines raises local recruitment when larvae match habitat cues before offshore export. The bri",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-ftle-ridge-threshold-correlates-larval-retention-proxy"
      ],
      "communication_gap": "Operational coastal forecasts emphasize Eulerian metrics (temperature, salinity fronts) while recruitment studies increasingly ingest circulation models — FTLE-based retention indices remain niche outside interdisciplinary larval-transport groups.\n",
      "translation_table": [
        {
          "field_a_term": "backward-time FTLE ridge (transport organizer)",
          "field_b_term": "larval retention probability envelope near coastline",
          "note": "Requires validating passive-float realism vs larval behavior."
        },
        {
          "field_a_term": "hyperbolic segments / stable/unstable manifold analogies (finite-time)",
          "field_b_term": "connectivity corridors vs recruitment sinks/sources",
          "note": "Shared language from dynamical systems maps onto ecological connectivity matrices only after calibration."
        },
        {
          "field_a_term": "tidal vs mesoscale forcing bandwidth in ψ(x,y,t)",
          "field_b_term": "spawning synchrony windows vs settlement-sensitive ontogeny",
          "note": "Multi-scale coupling often omitted in passive LCS-only narratives."
        }
      ],
      "references": [
        {
          "doi": "10.1016/j.physd.2005.10.007",
          "note": "Haller (2005) — definition and properties of Lagrangian coherent structures from hyperbolic trajectories (Physica D)."
        },
        {
          "doi": "10.3354/meps260083",
          "note": "Siegel et al. (2003) — Lagrangian descriptions of marine larval dispersion linking circulation structure to ecological transport questions (MEPS)."
        }
      ],
      "last_reviewed": "2026-05-07",
      "file": "cross-domain/oceanography-ecology/b-lcs-retention-zone-x-coastal-larval-supply.yaml"
    },
    {
      "id": "b-tidal-forcing-ocean-mixing",
      "title": "Tidal forcing generates internal waves at ocean ridges and seamounts that break and drive deep-ocean mixing, bridging physical oceanography and geophysics through the internal wave energy cascade that maintains the oceanic thermohaline circulation.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "Barotropic tides generated by gravitational forcing (moon and sun) interact with bottom topography to radiate baroclinic internal tides that propagate along density surfaces; these waves break via parametric subharmonic instability and shear instability, depositing approximately 1 TW of mechanical e",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-tidal-mixing-overturning-circulation-control"
      ],
      "communication_gap": "Physical oceanographers measure mixing turbulence and circulation while geophysicists measure tidal dissipation and Earth deformation; the internal tide pathway connecting tidal forcing to ocean mixing was only clarified in the 1990s-2000s by Munk & Wunsch, and climate models still struggle to parameterize the spatially heterogeneous tidal mixing correctly.\n",
      "translation_table": [
        {
          "field_a_term": "barotropic tidal forcing (geophysics)",
          "field_b_term": "energy source for baroclinic internal tides (oceanography)",
          "note": "Bottom topography converts barotropic to baroclinic tidal energy at rate proportional to tidal velocity squared"
        },
        {
          "field_a_term": "internal wave dispersion relation omega^2 = N^2 sin^2(theta) + f^2 cos^2(theta)",
          "field_b_term": "stratification parameter N (Brunt-Vaisala frequency) in oceanography",
          "note": "The angle of internal wave propagation is set by the ratio of forcing frequency to N and f"
        },
        {
          "field_a_term": "tidal dissipation Q factor (geophysics)",
          "field_b_term": "abyssal mixing diffusivity kappa (oceanography)",
          "note": "The 2.1 TW tidal dissipation budget constrains available mixing energy for the overturning circulation"
        },
        {
          "field_a_term": "parametric subharmonic instability (fluid mechanics)",
          "field_b_term": "energy transfer from internal tides to inertial waves (oceanography)",
          "note": "PSI transfers energy from tidal frequency to near-inertial waves that more efficiently break and mix"
        }
      ],
      "references": [
        {
          "doi": "10.1016/S0967-0637(98)00070-8",
          "note": "Munk & Wunsch (1998) - abyssal recipes II; energetics of tidal and wind mixing"
        },
        {
          "doi": "10.1126/science.288.5473.1943",
          "note": "Egbert & Ray (2000) - significant dissipation of tidal energy in the deep ocean"
        },
        {
          "doi": "10.1017/S0022112003003823",
          "note": "St. Laurent & Garrett (2002) - role of internal tides in mixing the deep ocean"
        }
      ],
      "last_reviewed": "2026-05-07",
      "file": "cross-domain/oceanography-geophysics/b-tidal-forcing-ocean-mixing.yaml"
    },
    {
      "id": "b-neural-spectral-model-x-submesoscale-forecasting",
      "title": "Neural spectral forecasting bridges operator-learning frequency dynamics and submesoscale ocean prediction pipelines.",
      "source_domain": "",
      "target_domain": "",
      "status": "proposed",
      "bridge_claim": "Speculative analogy (to be empirically validated): Spectral neural surrogates can emulate energy-transfer dynamics across scales similarly to reduced spectral ocean models used for submesoscale forecast acceleration.",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-neural-spectral-ocean-model-improves-submesoscale-forecast-skill"
      ],
      "communication_gap": "Domain operators prioritize interpretable reliability diagnostics, while ML work often prioritizes aggregate accuracy without deployment-grade uncertainty audits.",
      "translation_table": [
        {
          "field_a_term": "model prior",
          "field_b_term": "domain prior",
          "note": "Both constrain inference in data-sparse regimes."
        },
        {
          "field_a_term": "uncertainty estimate",
          "field_b_term": "risk-aware decision support",
          "note": "Uncertainty quality determines practical utility."
        },
        {
          "field_a_term": "out-of-distribution behavior",
          "field_b_term": "deployment robustness",
          "note": "Shift sensitivity governs real-world reliability."
        }
      ],
      "references": [
        {
          "arxiv": "2010.08895",
          "note": "Operator-learning basis for spectral surrogate forecasting."
        }
      ],
      "last_reviewed": "2026-05-08",
      "file": "cross-domain/oceanography-machine-learning/b-neural-spectral-model-x-submesoscale-forecasting.yaml"
    },
    {
      "id": "b-ocean-acoustic-tomography-x-ultrasound-transmission-tomography",
      "title": "Ocean acoustic tomography infers large-scale internal temperature/salinity structure from acoustic travel times (and related observables) between widely separated sources and receivers — medical ultrasound computed tomography similarly reconstructs tissue acoustic parameters from projection-like measurements — both solve ill-posed inverse scattering problems with regularization and resolution limits governed by aperture and noise.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "Munk–Wunsch-style ocean tomography framed basin-scale warming signals using acoustic observables sensitive to sound-speed integrals along rays — ultrasound CT / transmission tomography reconstructs spatial maps of attenuation and sound speed from sets of through-transmission paths — shared toolkit i",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-adjoint-base-resolution-operator-matches-ray-density-despite-scale-gap"
      ],
      "communication_gap": "Oceanographers publish in JASA / Oceanography while clinical acoustics targets IEEE TUFFC / Radiology — inverse-problem algorithms transfer slowly despite identical Bayesian formulations.\n",
      "translation_table": [
        {
          "field_a_term": "Basin-scale acoustic pulse travel times / modal dispersion",
          "field_b_term": "Ultrasound projection times / spectral shifts through tissue slabs",
          "note": "Forward models share Hamiltonian ray equations with tissue heterogeneity replacing mesoscale eddies."
        },
        {
          "field_a_term": "Mesoscale ocean variability as tomographic noise / signal",
          "field_b_term": "Patient motion / perfusion variability as ultrasound CT nuisance parameters",
          "note": "Both inverse problems fight structured nonstationary clutter."
        },
        {
          "field_a_term": "Mooring / autonomous platform aperture geometry",
          "field_b_term": "Ring-array or rotating transducer aperture geometry",
          "note": "Sampling geometry determines resolution tensor ellipsoid shape."
        }
      ],
      "references": [
        {
          "doi": "10.1016/0198-0149(79)90003-1",
          "note": "Munk & Wunsch (1979) — ocean acoustic tomography: a scheme for large-scale monitoring"
        },
        {
          "doi": "10.1016/0161-7346(86)90006-6",
          "note": "Greenleaf et al. (1986) — ultrasonic computed tomography / acoustic imaging reconstruction paradigms"
        }
      ],
      "last_reviewed": "2026-05-07",
      "file": "cross-domain/oceanography-medicine/b-ocean-acoustic-tomography-x-ultrasound-transmission-tomography.yaml"
    },
    {
      "id": "b-ocean-color-radiative-transfer",
      "title": "The apparent color of the ocean surface as measured by satellite remote sensing is determined by the radiative transfer equation governing light propagation through a scattering and absorbing medium: the same mathematical framework (the scalar or vector radiative transfer equation with Mie-theory phase functions) that optical physicists use to model light in clouds, aerosols, and turbid media applies directly to ocean optics",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "Ocean color remote sensing solves the inverse problem of the radiative transfer equation (RTE): the water-leaving radiance L_w(lambda) measured at the top of atmosphere is related to inherent optical properties (absorption a(lambda) and backscattering b_b(lambda)) by the Gordon et al. (1988) model R",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-ocean-color-chlorophyll-inversion-accuracy"
      ],
      "communication_gap": "Oceanographers develop empirical band-ratio chlorophyll algorithms calibrated to in-situ data while optical physicists work with rigorous radiative transfer theory; the gap is closing with the bio-optical modelling community but operationally validated physics-based retrievals remain computationally expensive.",
      "translation_table": [
        {
          "field_a_term": "water-leaving radiance L_w(lambda) (oceanography)",
          "field_b_term": "diffuse reflectance from a scattering medium (optics)",
          "note": "L_w is the upwelling light exiting the ocean surface; it encodes the depth-integrated absorption and scattering"
        },
        {
          "field_a_term": "absorption coefficient a(lambda) of seawater + CDOM + phytoplankton (oceanography)",
          "field_b_term": "absorption cross-section in Beer-Lambert-Bouguer law (optics)",
          "note": "Both use the same exponential attenuation framework; total a is the sum of water, CDOM, and particle contributions"
        },
        {
          "field_a_term": "backscattering coefficient b_b(lambda) of particles (oceanography)",
          "field_b_term": "scattering phase function and asymmetry parameter g (optics)",
          "note": "Mie theory predicts b_b from particle size distribution; g determines forward vs. back scatter ratio"
        },
        {
          "field_a_term": "chlorophyll-a concentration [Chl] retrieval algorithm (oceanography)",
          "field_b_term": "inversion of radiative transfer equation for composition (optics)",
          "note": "OC3/OC4 band-ratio algorithms are empirical approximations to the full RTE inverse problem"
        }
      ],
      "references": [
        {
          "doi": "10.1029/JC093iC09p10909",
          "note": "Gordon et al. (1988) - a semianalytic radiance model of ocean color (R_rs bio-optical model)"
        },
        {
          "doi": "10.1029/2001GB001428",
          "note": "O'Reilly et al. (1998) - ocean color chlorophyll algorithms for SeaWiFS (OC4 algorithm)"
        },
        {
          "doi": "10.1364/AO.24.003591",
          "note": "Morel & Prieur (1977) - analysis of variations in ocean color (foundational inherent optical properties)"
        }
      ],
      "last_reviewed": "2026-05-07",
      "file": "cross-domain/oceanography-optics/b-ocean-color-radiative-transfer.yaml"
    },
    {
      "id": "b-ribosome-kinetics-queuing-theory",
      "title": "Ribosome translation kinetics on mRNA is a totally asymmetric simple exclusion process (TASEP): a driven lattice gas equivalent to a 1D queuing system with site exclusion",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "The totally asymmetric simple exclusion process (TASEP) models ribosomes moving along mRNA: each ribosome occupies ℓ codons, enters at the 5' end at rate α (initiation), hops forward at rate β(i) (translation elongation at codon i), and exits at the 3' end at rate γ (termination). In the homogeneous",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-ribosome-kinetics-queuing-theory"
      ],
      "communication_gap": "Molecular biologists who study translation efficiency use codon adaptation indices and ribosome profiling data without typically knowing TASEP theory or queuing network mathematics. Operations researchers who study tandem queues are unaware that their models have exact solutions applicable to gene expression. The connection was made by physicists (Shaw et al. 2003) but remains siloed in biophysics journals.\n",
      "translation_table": [
        {
          "field_a_term": "ribosome initiation rate α (biology)",
          "field_b_term": "customer arrival rate λ in queuing theory",
          "note": "5'UTR structure and Kozak sequence strength determine α — analogous to queue arrival intensity"
        },
        {
          "field_a_term": "codon translation rate β(i) (codon-specific)",
          "field_b_term": "service rate μᵢ at server i in a tandem queue",
          "note": "Rare codons with low β slow the entire ribosome queue — analogous to bottleneck server"
        },
        {
          "field_a_term": "ribosome density on mRNA ρ(x)",
          "field_b_term": "occupancy profile along the tandem queue",
          "note": "Dense pileup upstream of slow codons = queue buildup upstream of slow server"
        },
        {
          "field_a_term": "protein production rate J",
          "field_b_term": "throughput of the queuing network",
          "note": "TASEP current J = ρ(1-ρ)·β_min at the bottleneck codon"
        }
      ],
      "references": [
        {
          "doi": "10.1103/PhysRevE.68.021910",
          "note": "Shaw et al. (2003) Totally asymmetric exclusion process with extended objects — a model for protein synthesis. Phys Rev E 68:021910"
        },
        {
          "doi": "10.1126/science.1168978",
          "note": "Ingolia et al. (2009) Genome-wide analysis in vivo of translation with nucleotide resolution using ribosome profiling. Science 324:218"
        },
        {
          "doi": "10.1007/BF01007938",
          "note": "MacDonald et al. (1968) Kinetics of biopolymerization on nucleic acid templates. Biopolymers 6:1"
        }
      ],
      "last_reviewed": "2026-05-07",
      "file": "cross-domain/operations-research-complex-systems/b-ribosome-kinetics-queuing-theory.yaml"
    },
    {
      "id": "b-supply-chain-network-robustness",
      "title": "Supply chain resilience is determined by network topology in the same way as infrastructure robustness in complex systems theory, with the Barabasi-Albert scale-free network model predicting that targeted hub disruption causes cascading failures while random disruption is absorbed.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "Supply chain networks mapped as directed graphs (nodes = firms, edges = supplier-buyer relationships) exhibit scale-free degree distributions with a small number of high-degree hub suppliers; Barabasi-Albert robustness theory predicts these networks are robust to random firm failures (probability in",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-supply-chain-percolation-threshold-dual-sourcing"
      ],
      "communication_gap": "Operations researchers optimize supply chains using scheduling and inventory models while network scientists develop robustness theory abstractly; firm-level supply chain network data became available only with recent advances in economic tracking, and the quantitative connection to Barabasi-Albert robustness is still not widely used in supply chain management practice.\n",
      "translation_table": [
        {
          "field_a_term": "critical supplier / bottleneck (operations research)",
          "field_b_term": "hub node in scale-free network (complex systems)",
          "note": "High-degree supplier nodes are simultaneously most valuable and most disruptive when removed"
        },
        {
          "field_a_term": "supply chain disruption propagation (operations research)",
          "field_b_term": "cascade failure / percolation threshold (complex systems)",
          "note": "Firm failure propagates downstream when inventory buffers are exhausted; maps to percolation on networks"
        },
        {
          "field_a_term": "safety stock / redundancy (operations research)",
          "field_b_term": "network redundancy / path multiplicity (complex systems)",
          "note": "Dual sourcing increases effective network connectivity, raising the percolation threshold"
        },
        {
          "field_a_term": "just-in-time inventory (operations research)",
          "field_b_term": "reducing edge weight / path redundancy (complex systems)",
          "note": "JIT reduces system robustness by minimizing buffering exactly as reducing edge weights lowers robustness"
        }
      ],
      "references": [
        {
          "doi": "10.1038/35019019",
          "note": "Albert, Jeong & Barabasi (2000) - error and attack tolerance of complex networks"
        },
        {
          "doi": "10.1126/science.1257856",
          "note": "Acemoglu et al. (2012) - network origins of aggregate fluctuations (supply network cascades)"
        },
        {
          "doi": "10.1038/s41586-020-2649-2",
          "note": "Carvalho et al. (2021) - supply chain disruptions from COVID-19 as network cascade"
        }
      ],
      "last_reviewed": "2026-05-07",
      "file": "cross-domain/operations-research-complex-systems/b-supply-chain-network-robustness.yaml"
    },
    {
      "id": "b-multi-armed-bandits-x-sepsis-antibiotic-de-escalation",
      "title": "Constrained multi-armed bandits can transfer from sequential decision theory to sepsis antibiotic de-escalation policy.",
      "source_domain": "",
      "target_domain": "",
      "status": "proposed",
      "bridge_claim": "Speculative analogy: Constrained multi-armed bandits can transfer from sequential decision theory to sepsis antibiotic de-escalation policy.",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-constrained-bandit-policies-reduce-sepsis-antibiotic-overtreatment-days"
      ],
      "communication_gap": "Domain-specific vocabularies and benchmark conventions obscure transferable mathematical structure.",
      "translation_table": [],
      "references": [
        {
          "doi": "10.1214/aoms/1177731118",
          "note": "Sequential test optimality reference linked to adaptive decision timing."
        }
      ],
      "last_reviewed": "2026-05-08",
      "file": "cross-domain/operations-research-infectious-disease/b-multi-armed-bandits-x-sepsis-antibiotic-de-escalation.yaml"
    },
    {
      "id": "b-heavy-traffic-queueing-x-emergency-department-flow",
      "title": "Heavy-traffic queueing limits provide transferable control laws for emergency-department flow stabilization.",
      "source_domain": "",
      "target_domain": "",
      "status": "proposed",
      "bridge_claim": "Speculative analogy: Heavy-traffic queueing limits provide transferable control laws for emergency-department flow stabilization.",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-diffusion-queueing-threshold-policies-reduce-ed-boarding-time-variance"
      ],
      "communication_gap": "The two communities use different notation, benchmarks, and publication venues, which obscures transferable mathematical structure.",
      "translation_table": [],
      "references": [
        {
          "doi": "10.1287/opre.29.3.567",
          "note": "Heavy-traffic queueing approximations."
        }
      ],
      "last_reviewed": "2026-05-08",
      "file": "cross-domain/operations-research-medicine/b-heavy-traffic-queueing-x-emergency-department-flow.yaml"
    },
    {
      "id": "b-chromatic-aberration-dispersion",
      "title": "Chromatic aberration in optical systems is a direct consequence of the wavelength-dependent dispersion relation n(ω) of optical media, described by the Sellmeier equation; correcting it requires engineering material combinations whose dispersion curves produce an achromatic doublet satisfying the thin-lens condition Σ(φ_i/V_i) = 0 where V_i is the Abbe number",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "Chromatic aberration arises because the refractive index n(ω) follows the Sellmeier dispersion relation n^2(ω) = 1 + Σ B_i*ω_i^2/(ω_i^2 - ω^2), so different wavelengths focus at different distances (longitudinal) or with different magnifications (lateral); the Abbe number V = (n_d - 1)/(n_F - n_C) c",
      "open_unknowns": [],
      "related_hypotheses": [],
      "communication_gap": "Optical engineers design aberration-corrected lenses using ray tracing and Zernike polynomials while wave physicists study dispersion in guided modes and pulse propagation; the identical mathematical structure of dispersion compensation is taught separately in lens design and fiber optics courses.",
      "translation_table": [
        {
          "field_a_term": "chromatic aberration focal length variation Δf (optics)",
          "field_b_term": "group velocity dispersion coefficient β_2 = d^2k/dω^2 (wave physics)",
          "note": "Longitudinal chromatic aberration Δf ∝ f/V; β_2 characterizes pulse spreading in the same mathematical framework"
        },
        {
          "field_a_term": "Abbe number V = (n_d-1)/(n_F-n_C) (optics)",
          "field_b_term": "inverse of the group-velocity dispersion strength (wave physics)",
          "note": "Low Abbe number = high dispersion = large β_2 analog; crown glass (V~60) vs flint glass (V~30)"
        },
        {
          "field_a_term": "achromatic doublet design condition (optics)",
          "field_b_term": "dispersion-managed fiber balancing normal and anomalous GVD (wave physics)",
          "note": "Both minimize wavelength-dependent phase error by combining materials with opposite dispersion slopes"
        },
        {
          "field_a_term": "apochromat correction at three wavelengths (optics)",
          "field_b_term": "higher-order dispersion compensation in ultrashort pulse optics (wave physics)",
          "note": "Apochromats minimize third-order aberration; dispersion-compensating mirrors address TOD in ultrafast lasers"
        }
      ],
      "references": [
        {
          "doi": "10.1364/JOSAA.9.000471",
          "note": "Smith (1992) - modern optical engineering and achromatic lens design"
        },
        {
          "doi": "10.1126/science.291.5510.1904",
          "note": "Agrawal (2001) - nonlinear fiber optics and dispersion management"
        },
        {
          "doi": "10.1038/nnano.2016.35",
          "note": "Khorasaninejad et al. (2016) - achromatic metalenses eliminating chromatic aberration in metasurfaces"
        }
      ],
      "last_reviewed": "2026-05-07",
      "file": "cross-domain/optics-physics/b-chromatic-aberration-dispersion.yaml"
    },
    {
      "id": "b-drug-resistance-fitness-landscapes",
      "title": "Drug resistance evolution follows paths on fitness landscapes, with the accessibility of multi-drug resistance determined by the ruggedness and sign epistasis of the landscape, connecting pharmacology to evolutionary biology through the geometry of sequence space.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "The set of all possible resistance mutations forms a fitness landscape in sequence space; empirical fitness landscapes for beta-lactamase (TEM-1) and HIV protease show rugged landscapes with sign epistasis—where the benefit of a mutation depends on background genotype—predicting that multi-drug resi",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-collateral-sensitivity-cycling-drug-resistance"
      ],
      "communication_gap": "Pharmacologists study drug resistance through MIC measurements and clinical outcomes while evolutionary biologists develop fitness landscape theory; the quantitative mapping between pharmacological parameters (MIC fold-change, cross-resistance profiles) and landscape geometry (epistasis, ruggedness) is still not routinely used in clinical drug resistance management.\n",
      "translation_table": [
        {
          "field_a_term": "drug resistance mutation (pharmacology)",
          "field_b_term": "fitness-increasing step in sequence space (evolutionary biology)",
          "note": "Each resistance mutation moves the pathogen to a higher-fitness genotype relative to the drug environment"
        },
        {
          "field_a_term": "minimum inhibitory concentration MIC (pharmacology)",
          "field_b_term": "fitness value at a point in the landscape (evolutionary biology)",
          "note": "Inverse MIC is a proxy for fitness in the drug environment; landscape ruggedness determines resistance accessibility"
        },
        {
          "field_a_term": "collateral sensitivity (pharmacology)",
          "field_b_term": "sign epistasis creating fitness valleys between resistance genotypes",
          "note": "A mutation conferring resistance to drug A reduces fitness in drug B; exploitable for combination therapy"
        },
        {
          "field_a_term": "multi-drug resistance (pharmacology)",
          "field_b_term": "accessible path on the fitness landscape through sign-epistatic terrain",
          "note": "Resistance to multiple drugs requires a sequence of accessible (uphill) mutational steps"
        }
      ],
      "references": [
        {
          "doi": "10.1126/science.1173032",
          "note": "Weinreich et al. (2006) - Darwinian evolution can follow only very few mutational paths to fitter proteins"
        },
        {
          "doi": "10.1371/journal.pbio.1002184",
          "note": "Munck et al. (2015) - prediction and validation of collateral sensitivity in drug resistant bacteria"
        },
        {
          "doi": "10.1038/s41559-019-0800-6",
          "note": "Nichol et al. (2019) - bacterial fitness landscapes mapped by multiple antibiotic combinations"
        }
      ],
      "last_reviewed": "2026-05-07",
      "file": "cross-domain/pharmacology-evolutionary-biology/b-drug-resistance-fitness-landscapes.yaml"
    },
    {
      "id": "b-neural-ode-x-pharmacokinetic-state-space-modeling",
      "title": "Neural ODE parameterization bridges continuous-depth learning and pharmacokinetic state-space modeling for sparse therapeutic-drug monitoring.",
      "source_domain": "",
      "target_domain": "",
      "status": "proposed",
      "bridge_claim": "Speculative analogy (to be empirically validated): continuous-time latent dynamics learned by neural ordinary differential equations can serve as constrained surrogates for compartmental PK models when sampling is sparse and patient-specific variability is high.",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-neural-ode-priors-improve-pk-state-forecasting"
      ],
      "communication_gap": "Pharmacometrics emphasizes mechanistic compartment structure, while ML emphasizes function approximation; shared validation benchmarks are still limited.",
      "translation_table": [
        {
          "field_a_term": "neural ODE latent flow",
          "field_b_term": "drug concentration state trajectory",
          "note": "Both are continuous-time trajectories inferred from irregular observations."
        },
        {
          "field_a_term": "adjoint sensitivity",
          "field_b_term": "PK parameter identifiability",
          "note": "Gradient pathways map to sensitivity of concentration predictions to kinetic parameters."
        },
        {
          "field_a_term": "latent state regularization",
          "field_b_term": "physiologic plausibility constraints",
          "note": "Regularizers enforce nonnegative concentrations and realistic clearance scales."
        }
      ],
      "references": [
        {
          "arxiv": "1806.07366",
          "note": "Neural Ordinary Differential Equations."
        },
        {
          "url": "https://www.fda.gov/drugs/science-and-research-drugs/model-informed-drug-development-program",
          "note": "FDA overview of model-informed drug development context."
        }
      ],
      "last_reviewed": "2026-05-08",
      "file": "cross-domain/pharmacology-machine-learning/b-neural-ode-x-pharmacokinetic-state-space-modeling.yaml"
    },
    {
      "id": "b-pharmacokinetics-compartmental-ode",
      "title": "Pharmacokinetics is applied ODE compartmental modeling: drug concentration-time profiles in plasma, tissue, and urine follow C(t) = Σ A_i*exp(-λ_i*t) whose eigenvalues {λ_i} are the roots of the characteristic polynomial of the transfer matrix K, with pharmacokinetic parameters (clearance CL = k_10*V_c, distribution volume V_d) directly mapping to compartment rate constants",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "A two-compartment pharmacokinetic model is a system of linear ODEs: dC_c/dt = -(k_10 + k_12)*C_c + k_21*C_p and dC_p/dt = k_12*C_c - k_21*C_p, whose solution after IV bolus is C_c(t) = A*exp(-αt) + B*exp(-βt), where α and β are eigenvalues of the matrix K = [[k_10+k_12, k_21],[k_12, k_21]]; clearanc",
      "open_unknowns": [],
      "related_hypotheses": [],
      "communication_gap": "Pharmacologists characterize drug behavior empirically through plasma sampling studies while applied mathematicians study compartmental models as ODEs; the connection between pharmacokinetic parameter estimation (nonlinear curve fitting to multiexponential data) and eigenvalue analysis of the rate matrix is rarely made explicit in clinical pharmacology training.",
      "translation_table": [
        {
          "field_a_term": "pharmacokinetic two-compartment model (pharmacology)",
          "field_b_term": "2×2 linear ODE system with first-order transfer rate matrix K (mathematics)",
          "note": "Central (plasma) and peripheral (tissue) compartments with rate constants k_12, k_21, k_10 (elimination)"
        },
        {
          "field_a_term": "drug clearance CL (pharmacology)",
          "field_b_term": "elimination rate constant k_10 multiplied by volume of central compartment (mathematics)",
          "note": "CL = k_10*V_c; represents proportional removal rate; relates to AUC by CL = Dose/AUC"
        },
        {
          "field_a_term": "distribution volume V_d (pharmacology)",
          "field_b_term": "ratio of total drug amount to plasma concentration at steady state (mathematics)",
          "note": "V_d = V_c + V_p*(k_12/k_21); apparent volume not a physical compartment volume"
        },
        {
          "field_a_term": "nonlinear pharmacokinetics (Michaelis-Menten saturation) (pharmacology)",
          "field_b_term": "nonlinear ODE with saturable elimination: dC/dt = -V_max*C/(K_m + C) (mathematics)",
          "note": "High-dose drugs (phenytoin, ethanol) show zero-order kinetics when C >> K_m; transition from linear to nonlinear predicted by Michaelis-Menten ODE"
        }
      ],
      "references": [
        {
          "doi": "10.1002/jps.2600620112",
          "note": "Rowland & Tozer (1989) - Clinical Pharmacokinetics: compartmental analysis"
        },
        {
          "doi": "10.1124/jpet.106.106443",
          "note": "Benet & Zia-Amirhosseini (1995) - basic principles of pharmacokinetics"
        },
        {
          "doi": "10.1007/978-3-319-04426-4",
          "note": "Gabrielsson & Weiner (2006) - Pharmacokinetic and Pharmacodynamic Data Analysis"
        }
      ],
      "last_reviewed": "2026-05-07",
      "file": "cross-domain/pharmacology-mathematics/b-pharmacokinetics-compartmental-ode.yaml"
    },
    {
      "id": "b-antibiotic-synergy-pharmacodynamic-surfaces",
      "title": "Antibiotic combination synergy is a pharmacodynamic interaction surface: Loewe additivity and Bliss independence define the null model separating true synergy from additivity",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "The effect of two antibiotics A and B at concentrations (a,b) defines a 3D pharmacodynamic response surface E(a,b) over the concentration plane. Loewe additivity provides the null interaction model: if (a/IC50_A)+(b/IC50_B)=1 produces effect E₀, then the combination is additive. Synergy occurs when ",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-antibiotic-synergy-pharmacodynamic-surfaces"
      ],
      "communication_gap": "Microbiologists and pharmacologists measure synergy using the scalar FICI from a single checkerboard cross, missing the full 2D surface structure. Mathematical models of interaction surfaces (response surface analysis, tensor decomposition) are standard in quantitative pharmacology but rarely applied in antimicrobial drug development, where clinical endpoints dominate over mechanistic surface mapping.\n",
      "translation_table": [
        {
          "field_a_term": "MIC (minimum inhibitory concentration) ratio for drug pair",
          "field_b_term": "fractional inhibitory concentration index (FICI) on the isobologram",
          "note": "FICI < 0.5 = synergy; 0.5-4 = indifference; > 4 = antagonism"
        },
        {
          "field_a_term": "drug target (ribosome, cell wall, DNA gyrase)",
          "field_b_term": "node in the essential gene interaction network",
          "note": "Drugs targeting synthetic lethal gene pairs yield synergy regardless of mechanism"
        },
        {
          "field_a_term": "checkerboard assay growth matrix",
          "field_b_term": "sampled pharmacodynamic response surface E(a,b)",
          "note": "Matrix rows/columns are 2-fold dilution series of each drug"
        },
        {
          "field_a_term": "Bliss independence model",
          "field_b_term": "probabilistic null model E = 1-(1-E_A)(1-E_B)",
          "note": "Bliss is the correct null when drug effects are probabilistically independent"
        }
      ],
      "references": [
        {
          "doi": "10.1128/AAC.01625-11",
          "note": "Greco et al. (1995) The search for synergy — a critical review from a response surface perspective. Pharmacol Rev 47:331"
        },
        {
          "doi": "10.1038/s41564-019-0423-z",
          "note": "Wood et al. (2019) Mechanism-based epistasis and synergy surfaces. Nat Microbiol 4:491"
        },
        {
          "doi": "10.1016/j.cell.2014.01.002",
          "note": "Chandrasekaran et al. (2016) Chemogenomics and orthology-based design of antibiotic combination therapies. Mol Syst Biol 12:872"
        }
      ],
      "last_reviewed": "2026-05-07",
      "file": "cross-domain/pharmacology-systems-biology/b-antibiotic-synergy-pharmacodynamic-surfaces.yaml"
    },
    {
      "id": "b-kolmogorov-complexity-explanation",
      "title": "The best scientific theory is the shortest program that computes the observed data — Kolmogorov complexity K(x) formalises Occam's razor as data compression, making scientific explanation equivalent to finding the minimum description length (MDL) model, and overfitting identical to using a description that is longer than necessary.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "Kolmogorov (1965) defined the complexity K(x) of a string x as the length (in bits) of the shortest program on a universal Turing machine U that outputs x and halts. Solomonoff (1964) independently defined algorithmic probability m(x) = Σ_{p:U(p)=x} 2^{-|p|} as the probability that a random program ",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-mdl-scientific-theory-selection"
      ],
      "communication_gap": "Kolmogorov (1965) published in a Russian mathematics journal. Solomonoff (1964) published in an obscure information theory journal. Rissanen (1978) published in Automatica (control theory). The three approaches were unified by Li & Vitányi (1997) in a book that remains specialised computer science literature. Philosophers of science are largely unaware of the formal MDL framework; statisticians who use AIC/BIC daily are unaware of the Kolmogorov complexity foundation; machine learning researchers who use regularisation and Bayesian methods are unaware that they are implementing approximations to Solomonoff induction.\n",
      "translation_table": [
        {
          "field_a_term": "Kolmogorov complexity K(x)",
          "field_b_term": "Length of shortest scientific theory that generates data x",
          "note": "Not computable, but approximable by compression algorithms (Lempel-Ziv, gzip, etc.)"
        },
        {
          "field_a_term": "Solomonoff prior m(x)",
          "field_b_term": "Universal Bayesian prior — assigns higher probability to simpler data",
          "note": "Dominating prior: any computable prior is bounded above by m(x) up to a constant"
        },
        {
          "field_a_term": "MDL model selection",
          "field_b_term": "Optimal scientific theory selection — minimise total description length",
          "note": "Equivalent to Bayesian model selection with a uniform prior over program lengths"
        },
        {
          "field_a_term": "Overfitting",
          "field_b_term": "Using a description longer than necessary — the model encodes noise",
          "note": "Overfitting ↔ K(model) + K(data|model) > K(data); regularisation ↔ MDL penalty on K(model)"
        },
        {
          "field_a_term": "Physical law (short program)",
          "field_b_term": "Scientific explanation — massive data compression",
          "note": "Newton's laws: ~100 bytes of code generate centuries of planetary motion; compression ratio ~10^{15}"
        },
        {
          "field_a_term": "Undecidability of K(x)",
          "field_b_term": "Science is an incomputable optimisation — no algorithm finds the best theory",
          "note": "NML (normalised maximum likelihood) and Bayesian compression are tractable approximations"
        },
        {
          "field_a_term": "Algorithmic mutual information I(T:D) = K(D) - K(D|T)",
          "field_b_term": "Amount of scientific explanation provided by theory T for data D",
          "note": "A theory that doesn't compress the data explains nothing; I(T:D) = 0 for random noise"
        }
      ],
      "references": [
        {
          "doi": "10.1007/BF01068057",
          "note": "Kolmogorov (1965) Probl Inf Transm 1:3 — three approaches to the quantitative definition of information"
        },
        {
          "doi": "10.1016/S0019-9958(64)90223-2",
          "note": "Solomonoff (1964) Inf Control 7:224 — formal theory of inductive inference; algorithmic probability"
        },
        {
          "doi": "10.1016/0005-1098(78)90005-5",
          "note": "Rissanen (1978) Automatica 14:465 — modeling by the shortest data description; MDL principle"
        },
        {
          "note": "Li & Vitányi (2008) An Introduction to Kolmogorov Complexity and Its Applications, 3rd ed. (Springer)",
          "url": "https://link.springer.com/book/10.1007/978-0-387-49820-1"
        },
        {
          "doi": "10.1145/584091.584093",
          "note": "Cilibrasi & Vitányi (2005) IEEE Trans Inf Theory — clustering by compression; NCD"
        }
      ],
      "last_reviewed": "2026-05-05",
      "file": "cross-domain/philosophy-of-science-information/b-kolmogorov-complexity-explanation.yaml"
    },
    {
      "id": "b-bayesian-scientific-inference",
      "title": "Scientific inference is Bayesian belief updating: Bayes' theorem formalises induction, Occam's razor emerges as automatic model complexity penalty, and the Duhem-Quine problem maps to Bayesian model comparison — unifying philosophy of science with probability theory.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "The central problem of philosophy of science — how does evidence confirm or disconfirm hypotheses? — is solved in quantitative form by Bayes' theorem:\n\n  P(H | E) = P(E | H) · P(H) / P(E)\n\nBayesian confirmation theory (Howson & Urbach 1989) shows that this equation captures the entire logic of empir",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-bayes-factor-theory-selection",
        "h-preregistration-field-replication-rate"
      ],
      "communication_gap": "Philosophers of science study confirmation theory in journals like British Journal for the Philosophy of Science and Philosophy of Science. Statisticians develop Bayesian methods in JASA, Biometrika, and Bayesian Analysis. The explicit identification of Bayesian confirmation theory with the epistemology of science was made by Howson & Urbach (1989) and MacKay (2003), but is still not standard in either field's training. Most working scientists are taught null-hypothesis significance testing (NHST), not Bayesian inference, creating a systematic disconnect between the normative theory of scientific inference and scientific practice.\n",
      "translation_table": [
        {
          "field_a_term": "Prior probability P(H) (philosophy of science)",
          "field_b_term": "Prior distribution over hypothesis space (Bayesian statistics)",
          "note": "Encodes background theoretical commitments and plausibility"
        },
        {
          "field_a_term": "Predictive power of hypothesis",
          "field_b_term": "Likelihood P(E | H)",
          "note": "A hypothesis is powerful if it strongly predicts what was observed"
        },
        {
          "field_a_term": "Rational degree of belief after evidence",
          "field_b_term": "Posterior probability P(H | E)",
          "note": "The output of Bayesian updating IS rational scientific credence"
        },
        {
          "field_a_term": "Occam's razor (prefer simpler hypotheses)",
          "field_b_term": "Bayesian Occam factor (automatic complexity penalty in marginal likelihood)",
          "note": "Occam's razor is a theorem of probability theory, not a separate principle"
        },
        {
          "field_a_term": "Bayes factor B_{01} = P(E|H_0) / P(E|H_1)",
          "field_b_term": "Evidence ratio between competing scientific theories",
          "note": "Kass & Raftery (1995) calibration: B > 150 = very strong evidence"
        },
        {
          "field_a_term": "Duhem-Quine problem (which auxiliary to reject)",
          "field_b_term": "Bayesian posterior over (H_core, H_aux) joint hypothesis space",
          "note": "Probability theory distributes disconfirmation rationally across candidates"
        },
        {
          "field_a_term": "Scientific consensus",
          "field_b_term": "Community posterior after all published evidence",
          "note": "Formal definition of scientific consensus in Bayesian terms"
        },
        {
          "field_a_term": "Theory change / paradigm shift",
          "field_b_term": "Large Bayes factor + prior update shifting posterior mass",
          "note": "Kuhnian revolutions = sudden accumulation of decisive Bayes factors"
        }
      ],
      "references": [
        {
          "note": "Jeffreys (1939) Theory of Probability (Oxford University Press) — first systematic Bayesian framework for scientific inference"
        },
        {
          "note": "Howson & Urbach (1989) Scientific Reasoning: The Bayesian Approach (Open Court) — Bayesian confirmation theory"
        },
        {
          "note": "MacKay (2003) Information Theory, Inference, and Learning Algorithms (Cambridge) — Occam factor as theorem"
        },
        {
          "doi": "10.1080/01621459.1995.10476572",
          "note": "Kass & Raftery (1995) JASA 90:773 — Bayes factors and their interpretation scale"
        }
      ],
      "last_reviewed": "2026-05-05",
      "file": "cross-domain/philosophy-of-science-mathematics/b-bayesian-scientific-inference.yaml"
    },
    {
      "id": "b-bayesian-inference-scientific-confirmation",
      "title": "The Bayesian account of scientific confirmation — evidence E confirms hypothesis H iff P(H|E) > P(H) — provides a quantitative, principled replacement for Popperian falsificationism, resolves Hempel's raven paradox, and explains why Bayesian model comparison via marginal likelihood automatically implements Occam's razor against overfitted hypotheses.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "The core Bayesian account of confirmation: evidence E confirms hypothesis H if P(H|E) > P(H), i.e., if observing E raises our credence in H. By Bayes' theorem: P(H|E) = P(E|H)·P(H) / P(E). The likelihood ratio P(E|H)/P(E|¬H) > 1 is the strength of confirmation (Bayes factor). This is a quantitative ",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-bayesian-marginal-likelihood-occam-razor-automatic"
      ],
      "communication_gap": "Philosophy of science and statistics have parallel traditions that rarely intersect. Howson & Urbach (2006) made the Bayesian-confirmation connection explicit, but philosophy journals (British Journal for the Philosophy of Science, Philosophy of Science) and statistics journals (Annals of Statistics, Bayesian Analysis) have non-overlapping readerships. Philosophers cite Glymour, Hempel, Popper; statisticians cite Jeffreys, Jaynes, Lindley. The mathematical equivalence of their accounts of \"evidence\" is rarely acknowledged across the disciplinary boundary.\n",
      "translation_table": [
        {
          "field_a_term": "scientific confirmation of hypothesis H by evidence E",
          "field_b_term": "Bayesian posterior update P(H|E) > P(H)",
          "note": "Degree of confirmation = log Bayes factor log[P(E|H)/P(E|¬H)]"
        },
        {
          "field_a_term": "Popper's falsification (H refuted by E)",
          "field_b_term": "Bayesian update with P(E|H) = 0 → P(H|E) = 0",
          "note": "Falsification is a degenerate Bayesian case, not a separate logic"
        },
        {
          "field_a_term": "Occam's razor (prefer simpler hypotheses)",
          "field_b_term": "marginal likelihood (Bayesian evidence) P(E|M) penalizes complexity",
          "note": "Complex models integrate over more parameter space → lower P(E|M) if extra complexity is unused"
        },
        {
          "field_a_term": "scientific prior probability P(H)",
          "field_b_term": "prior in Bayesian inference (subjective or reference prior)",
          "note": "The objectivity debate: Jeffreys' reference priors minimize information about θ"
        },
        {
          "field_a_term": "Jeffreys-Lindley paradox (p-value vs. Bayes factor)",
          "field_b_term": "different questions: p(E|H₀) vs. P(H₀|E)/P(H₁|E)",
          "note": "Frequentist p-value and Bayes factor are not measuring the same thing"
        },
        {
          "field_a_term": "old evidence problem (Glymour 1980)",
          "field_b_term": "conditioning on known data: P(H|E)=P(H) when E already in sample space",
          "note": "Resolved by counterfactual Bayesianism or using Bayes factor as measure"
        }
      ],
      "references": [
        {
          "note": "Howson & Urbach (2006) Scientific Reasoning: The Bayesian Approach. 3rd ed. Open Court."
        },
        {
          "note": "Jaynes, E.T. (2003) Probability Theory: The Logic of Science. Cambridge University Press."
        },
        {
          "note": "Jeffreys, H. (1961) Theory of Probability. 3rd ed. Oxford University Press."
        },
        {
          "note": "Glymour, C. (1980) Theory and Evidence. Princeton University Press."
        }
      ],
      "last_reviewed": "2026-05-06",
      "file": "cross-domain/philosophy-of-science-statistics/b-bayesian-inference-scientific-confirmation.yaml"
    },
    {
      "id": "b-induction-bayesian-convergence",
      "title": "Hume's problem of induction — no finite evidence can logically prove a universal law — is dissolved by Bayesian convergence theorems showing that posterior beliefs converge to truth with probability 1 as evidence accumulates (Doob 1949), while Popperian falsificationism corresponds to the degenerate case of zero prior that Bayesian theory proves leads to incoherence.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "Hume (1748, Enquiry Concerning Human Understanding, Section IV) argued that the inference \"the sun will rise tomorrow because it always has\" is logically circular — we cannot justify inductive inference by induction itself. No finite sequence of observations can entail a universal law; any proposed ",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-doob-convergence-rate-scientific-inference"
      ],
      "communication_gap": "Philosophers of science and statisticians publish in entirely separate literatures. Philosophers cite Hume, Popper, Lakatos; statisticians cite Bayes, Laplace, Fisher, Neyman-Pearson. Bayesian statisticians are aware of the philosophical implications of Doob's theorem, but the result is rarely discussed in philosophy of science journals. Popper's falsificationism remains highly influential in scientific culture (the \"falsifiability\" criterion for science) even as Bayesian statistics has become dominant in practice. The reconciliation remains primarily in the philosophy of probability literature (Howson & Urbach, de Finetti, Savage) which is unfamiliar to most working scientists.\n",
      "translation_table": [
        {
          "field_a_term": "universal law (Hume's target of induction)",
          "field_b_term": "point hypothesis H: θ = θ₀ (statistics)"
        },
        {
          "field_a_term": "finite observations (Hume's evidence)",
          "field_b_term": "data D_n of n observations (Bayesian update)"
        },
        {
          "field_a_term": "problem of induction (cannot prove universals from particulars)",
          "field_b_term": "consistent posterior (converges to truth with probability 1)"
        },
        {
          "field_a_term": "Cromwell's rule (do not assign p=0 to live hypotheses)",
          "field_b_term": "Hume's acknowledgement that we cannot rule out alternatives a priori"
        },
        {
          "field_a_term": "Popperian falsification (theory is corroborated until falsified)",
          "field_b_term": "degenerate 0/1 prior (Bayesian incoherence / Dutch book)"
        },
        {
          "field_a_term": "Bayes factor K (ratio of marginal likelihoods)",
          "field_b_term": "continuous corroboration measure replacing binary falsification"
        }
      ],
      "references": [
        {
          "url": "https://www.gutenberg.org/ebooks/9662",
          "note": "Hume (1748) An Enquiry Concerning Human Understanding — problem of induction (Section IV)"
        },
        {
          "url": "https://www.cambridge.org/gb/universitypress/subjects/philosophy/philosophy-science/logic-scientific-discovery",
          "note": "Popper (1959) The Logic of Scientific Discovery — falsificationism"
        },
        {
          "doi": "10.2307/1990660",
          "note": "Doob (1949) Application of the theory of martingales — consistency theorem, Trans AMS 75:58"
        },
        {
          "url": "https://www.jstor.org/stable/2287514",
          "note": "de Finetti (1937) La Prévision — Dutch book argument and coherence, Ann Inst Poincaré"
        },
        {
          "doi": "10.2307/2291091",
          "note": "Kass & Raftery (1995) Bayes factors, J Am Stat Assoc 90:773"
        }
      ],
      "last_reviewed": "2026-05-05",
      "file": "cross-domain/philosophy-of-science-statistics/b-induction-bayesian-convergence.yaml"
    },
    {
      "id": "b-philosophy-underdetermination-quantum",
      "title": "The quantum measurement problem and the philosophical underdetermination of theory by evidence share the same mathematical structure: in both cases, a superposition of possibilities collapses to a definite outcome only through an observer-dependent selection process whose physical basis is unspecified.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "proposed",
      "bridge_claim": "The underdetermination problem in philosophy of science (Quine-Duhem): any observation O is consistent with infinitely many theories T1, T2, ..., because any Ti can be protected by adjusting auxiliary hypotheses. The selection of a unique theory from the space of empirically equivalent theories requ",
      "open_unknowns": [],
      "related_hypotheses": [],
      "communication_gap": "Quantum physicists and philosophers of science attend different conferences, publish in different journals, and use different formalisms. The quantum foundations community is aware of philosophical parallels but often dismisses them as \"mere interpretation\" without practical consequence. Philosophy of science rarely engages with the technical quantum no-go theorems (PBR, Bell, Kochen-Specker) as evidence in the underdetermination debate. The bridge requires both technical quantum formalism literacy from philosophers and engagement with formal epistemology from physicists.\n",
      "translation_table": [
        {
          "field_a_term": "Underdetermination of theory by evidence",
          "field_b_term": "Quantum superposition (multiple simultaneous descriptions)"
        },
        {
          "field_a_term": "Theory selection (non-evidential criteria)",
          "field_b_term": "Measurement/collapse (outcome selection)"
        },
        {
          "field_a_term": "Anti-realism (no fact beyond empirical adequacy)",
          "field_b_term": "Copenhagen interpretation (no value before measurement)"
        },
        {
          "field_a_term": "Scientific realism (theories true beyond phenomena)",
          "field_b_term": "Pilot wave / hidden variable interpretations (definite state always)"
        },
        {
          "field_a_term": "Many theories equally empirically adequate",
          "field_b_term": "Many-Worlds (all branches equally real)"
        },
        {
          "field_a_term": "Theoretical virtues (simplicity, parsimony)",
          "field_b_term": "Interpretive criteria (ontological parsimony in Many-Worlds; non-locality cost in Pilot Wave)"
        }
      ],
      "references": [
        {
          "doi": "10.1038/nphys2309",
          "note": "Pusey, Barrett & Rudolph (2012) - On the reality of the quantum state; empirical constraint on epistemic interpretations"
        },
        {
          "doi": "10.1103/PhysicsPhysiqueFizika.1.195",
          "note": "Bell (1964) - On the Einstein-Podolsky-Rosen paradox; local hidden variable constraint"
        },
        {
          "url": "https://plato.stanford.edu/entries/scientific-underdetermination/",
          "note": "Stanford Encyclopedia of Philosophy: Underdetermination of Scientific Theory - formal analysis"
        }
      ],
      "last_reviewed": "2026-05-05",
      "file": "cross-domain/philosophy-quantum/b-philosophy-underdetermination-quantum.yaml"
    },
    {
      "id": "b-phase-transitions-ml-grokking",
      "title": "Statistical physics phase transitions ↔ sudden generalization (grokking), double descent, and loss landscape geometry in deep learning",
      "source_domain": "",
      "target_domain": "",
      "status": "proposed",
      "bridge_claim": "Deep neural networks undergo a series of phenomena that are strikingly described by the language of statistical physics phase transitions:\n1. **Grokking (Power et al. 2022)**: a model trains to 100% training\n   accuracy but only *later* generalises to the test set — sometimes\n   thousands of epochs ",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-grokking-criticality-universality"
      ],
      "communication_gap": "Phase transitions in machine learning were mathematically framed by the statistical physics community (Sompolinsky, Gardner, Amit, Mezard — JSTAT and PRL papers, 1980s-90s) but were essentially ignored by the ML engineering community until Kaplan et al. (2020) scaling laws made the empirical power laws impossible to ignore. The grokking phenomenon (Power et al. 2022, OpenAI) was reported by ML engineers without reference to phase transition language; the connection was made independently in several 2023-2026 papers. No textbook in deep learning connects grokking to the Ising model.\n",
      "translation_table": [
        {
          "field_a_term": "Temperature T in spin glass",
          "field_b_term": "Learning rate / noise level in SGD",
          "note": "High T = high noise = disordered phase; low T = sharp loss basin = ordered phase"
        },
        {
          "field_a_term": "Order parameter m (magnetisation)",
          "field_b_term": "Test accuracy / generalisation gap",
          "note": "Zero in disordered phase; jumps discontinuously at phase transition"
        },
        {
          "field_a_term": "Critical capacity alpha_c = 0.138 (Hopfield)",
          "field_b_term": "Interpolation threshold (N_params = N_data)",
          "note": "Both mark the boundary between memorisation and catastrophic failure/spin-glass phase"
        },
        {
          "field_a_term": "Replica symmetry breaking (RSB)",
          "field_b_term": "Multiple distinct loss basins (flat minima landscape)",
          "note": "RSB = exponentially many metastable states; deep learning = exponentially many flat minima"
        },
        {
          "field_a_term": "1/f noise at criticality",
          "field_b_term": "Neural scaling power laws L ~ N^{-alpha}",
          "note": "Power laws in both; grokking is near-critical; scaling laws suggest criticality throughout training"
        },
        {
          "field_a_term": "Renormalisation group (RG) fixed point",
          "field_b_term": "Representation convergence (deep networks distil same features regardless of architecture)",
          "note": "RG: irrelevant operators flow to zero; DNN: architecture-independent features emerge at depth"
        }
      ],
      "references": [
        {
          "doi": "10.48550/arXiv.2201.02177",
          "note": "Power et al. (2022) — Grokking: generalisation beyond overfitting. Seeded from arXiv nlin:AO harvest 2026-05-04: Dimensional Criticality at Grokking Across MLPs and Transformers"
        },
        {
          "doi": "10.48550/arXiv.2001.08361",
          "note": "Kaplan et al. (2020) — Neural scaling laws"
        },
        {
          "doi": "10.48550/arXiv.1412.0233",
          "note": "Choromanska et al. (2015) — loss surfaces of DNN and spin glasses"
        },
        {
          "doi": "10.1103/PhysRevLett.35.1792",
          "note": "Sherrington & Kirkpatrick (1975) — original SK spin-glass model"
        }
      ],
      "last_reviewed": "2026-05-05",
      "file": "cross-domain/physics-ai/b-phase-transitions-ml-grokking.yaml"
    },
    {
      "id": "b-active-brownian-motion-x-cell-migration",
      "title": "Active Brownian Motion x Cell Migration - self-propelled particles in 2D\n",
      "source_domain": "",
      "target_domain": "",
      "status": "proposed",
      "bridge_claim": "Migrating cells (neutrophils, cancer cells) exhibit active Brownian motion: directional persistence at short timescales and diffusive behavior at long timescales, described by the active Ornstein-Uhlenbeck process; the persistence time and effective diffusivity are controlled by internal cytoskeleta",
      "open_unknowns": [],
      "related_hypotheses": [],
      "communication_gap": "Active matter physics (Vicsek model, active Brownian particles) and cell biology developed independently; the ABP model was systematically applied to single cell migration only after ~2010, despite single-cell tracking methods being available since the 1980s.\n",
      "translation_table": [
        {
          "field_a_term": "Cell crawling velocity (mu/h) with persistence",
          "field_b_term": "Self-propulsion velocity of active Brownian particle",
          "note": "A migrating cell maintains approximately constant speed with slowly reorienting direction, exactly matching the active Brownian particle model where self-propulsion speed v_0 is constant and orientation diffuses with rotational diffusivity D_r.\n"
        },
        {
          "field_a_term": "Persistence time (tau = 1/D_r, minutes to hours)",
          "field_b_term": "Rotational diffusion time of active Brownian particle",
          "note": "The persistence time tau over which the cell maintains its direction is 1/(2D_r) in 2D; it is set by the turnover time of the cytoskeletal polarity complex (Rac/Cdc42) and directly measurable from cell trajectory statistics.\n"
        },
        {
          "field_a_term": "Effective long-time diffusivity (D_eff = v_0^2 * tau / 2)",
          "field_b_term": "Long-time diffusion coefficient of active particle",
          "note": "At timescales >> tau, cell trajectories become diffusive with D_eff = v_0^2 * tau / (d-1) in d dimensions - the active Brownian particle result, relating macroscopic cell spreading to microscopic motility parameters.\n"
        },
        {
          "field_a_term": "Chemotaxis bias (gradient-directed migration)",
          "field_b_term": "Active particle with biased angular diffusion",
          "note": "Chemokine gradients bias the rotational diffusion of the cell's polarity axis toward the chemoattractant, equivalent to an active Brownian particle with a preferred orientation - adding drift to the diffusive regime.\n"
        }
      ],
      "references": [
        {
          "doi": "10.1073/pnas.1118355109",
          "note": "Maiuri et al. (2015) - actin flow mediates a coupling between cell morphology and migration velocity; PNAS 112:1475"
        },
        {
          "doi": "10.1088/1367-2630/13/7/073036",
          "note": "Trepat et al. (2009) - forces behind collective cell migration; active Brownian motion description"
        }
      ],
      "last_reviewed": "2026-05-07",
      "file": "cross-domain/physics-biology/b-active-brownian-motion-x-cell-migration.yaml"
    },
    {
      "id": "b-active-matter-collective-locomotion",
      "title": "The Vicsek model's phase transition from disordered to ordered collective motion in self-propelled particles — driven by noise-dependent symmetry breaking despite Mermin-Wagner theorem prohibition — explains flocking in birds, bacterial swarming, and cytoskeletal dynamics, bridging non-equilibrium statistical mechanics with biological collective behaviour.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "Active matter consists of self-propelled agents that continuously consume energy from internal fuel (ATP, chemical gradients, food) to generate directed motion. Examples span ten orders of magnitude: actin-myosin filaments (nm), bacteria (μm), cells (10μm), fish (cm), birds (m). The defining propert",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-active-nematic-defect-tissue-extrusion"
      ],
      "communication_gap": "The Vicsek model emerged from statistical physics (Physical Review Letters); collective animal behaviour emerged from ethology and ecology (Nature, Behavioral Ecology). For a decade after Vicsek (1995), the two literatures developed independently. Biologists studying bird flocking (Potts 1984, empirical) and physicists studying flocking models rarely cited each other. The connection was bridged partly by Couzin et al. (2002, 2005) who brought physics methods to biological collective motion data. The active nematics connection to cytoskeleton was developed primarily by the Cambridge group (Marchetti, Dogic) and required synthesising liquid crystal physics and cell biology — communities that almost never interact.\n",
      "translation_table": [
        {
          "field_a_term": "Vicsek velocity-alignment rule (average neighbours' directions)",
          "field_b_term": "social imitation in collective behaviour (conform to local norm)"
        },
        {
          "field_a_term": "noise amplitude η (misalignment per step)",
          "field_b_term": "individual variation / decision noise in biological system"
        },
        {
          "field_a_term": "critical noise η_c (phase transition)",
          "field_b_term": "critical density or perception range for collective behaviour onset"
        },
        {
          "field_a_term": "order parameter Φ = |⟨v⟩|/v₀ (mean polarisation)",
          "field_b_term": "degree of coherent flock or swarm alignment"
        },
        {
          "field_a_term": "active nematic ±½ topological defects",
          "field_b_term": "stress concentration points in tissue driving cell fate"
        },
        {
          "field_a_term": "actomyosin motor protein driving filament sliding",
          "field_b_term": "self-propulsion mechanism of active matter particle"
        },
        {
          "field_a_term": "non-equilibrium (self-propulsion, no Boltzmann measure)",
          "field_b_term": "metabolically active biological tissue (out-of-equilibrium)"
        },
        {
          "field_a_term": "spontaneous flow in active nematic",
          "field_b_term": "cytoplasmic streaming, tissue morphogenesis flows"
        }
      ],
      "references": [
        {
          "doi": "10.1103/PhysRevLett.75.1226",
          "note": "Vicsek et al. (1995) Phys Rev Lett 75:1226 — self-propelled particle model"
        },
        {
          "doi": "10.1103/PhysRevE.58.4828",
          "note": "Toner & Tu (1998) Phys Rev E 58:4828 — hydrodynamic theory of flocking"
        },
        {
          "doi": "10.1103/RevModPhys.85.1143",
          "note": "Marchetti et al. (2013) Rev Mod Phys 85:1143 — active matter review"
        },
        {
          "doi": "10.1038/s41467-018-05666-8",
          "note": "Doostmohammadi et al. (2018) Nat Commun 9:3246 — active nematics in biology"
        }
      ],
      "last_reviewed": "2026-05-06",
      "file": "cross-domain/physics-biology/b-active-matter-collective-locomotion.yaml"
    },
    {
      "id": "b-allosteric-regulation-x-conformational-dynamics",
      "title": "Allostery x Conformational Dynamics - protein communication as energy landscape shift\n",
      "source_domain": "",
      "target_domain": "",
      "status": "proposed",
      "bridge_claim": "Allosteric regulation (binding at one site changing activity at a distant site) occurs via population shift in the protein's conformational ensemble: the ligand reshapes the energy landscape, shifting Boltzmann weights between pre-existing conformational states rather than inducing new conformations",
      "open_unknowns": [],
      "related_hypotheses": [],
      "communication_gap": "The induced-fit model (Koshland 1958) and conformational selection model (Monod 1965) were debated for 40 years as competing mechanisms; modern NMR relaxation dispersion and energy landscape theory (Boehr, Nussinov, Wright 2009) showed they are limiting cases of the same population shift framework, but many biochemistry textbooks still present them as distinct mechanisms.\n",
      "translation_table": [
        {
          "field_a_term": "Allosteric ligand (effector molecule)",
          "field_b_term": "External field shifting the Boltzmann distribution",
          "note": "The ligand acts as an external field in the protein's energy landscape; it differentially stabilizes or destabilizes conformational substates, shifting their Boltzmann populations without creating fundamentally new states.\n"
        },
        {
          "field_a_term": "Active vs inactive protein conformations",
          "field_b_term": "Two-state energy minimum (bistable potential)",
          "note": "The protein's conformational energy landscape has at least two energy minima (active, inactive); the allosteric signal changes the relative free energies of these minima, adjusting the population ratio according to the Boltzmann factor.\n"
        },
        {
          "field_a_term": "Conformational entropy change upon allosteric binding",
          "field_b_term": "Entropy of conformational ensemble (Shannon entropy of p_i)",
          "note": "Allostery can operate through changes in conformational entropy (not just enthalpy); a ligand that narrows the conformational ensemble (entropy loss) at one site communicates to another site via correlated entropy changes - allosteric entropy.\n"
        },
        {
          "field_a_term": "Allosteric pathway (correlated residue motions)",
          "field_b_term": "Correlation function in the energy landscape",
          "note": "The allosteric pathway (which residues transmit the signal) corresponds to the chain of correlated fluctuations in the energy landscape; information theory (mutual information between residue positions) identifies these paths.\n"
        }
      ],
      "references": [
        {
          "doi": "10.1016/j.sbi.2010.01.002",
          "note": "Boehr, Nussinov & Wright (2009) - role of dynamic conformational ensembles in allostery; Nature Chem Biol 5:789"
        },
        {
          "doi": "10.1073/pnas.1216180110",
          "note": "Tzeng & Kalodimos (2013) - allosteric inhibition through entropy regulation; Nature 488:236"
        }
      ],
      "last_reviewed": "2026-05-07",
      "file": "cross-domain/physics-biology/b-allosteric-regulation-x-conformational-dynamics.yaml"
    },
    {
      "id": "b-bioacoustics-sound-production",
      "title": "Animal sound production and hearing are direct applications of acoustic physics — the Helmholtz resonator equation governs birdsong and vocal tract resonance, bat echolocation achieves near-physical-limit range resolution, and barn owl sound localization exploits interaural time differences with microsecond precision.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "Sound production in animals implements physical acoustic principles. Crickets stridulate by scraping a plectrum across file teeth — the resonant frequency is determined by file tooth spacing and wing membrane compliance, acting as a driven resonator. Frogs use vocal sac resonance to amplify calls; w",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-bat-echolocation-neural-matched-filter-implementation"
      ],
      "communication_gap": "Bioacoustics spans physics (acoustics, signal processing), biology (animal behavior, neuroscience), and engineering (sonar design). Acoustical Society of America publishes across all three areas but researchers from each discipline primarily attend their own conferences. Bat echolocation researchers (Animal Behaviour, J Exp Biol) and radar/sonar engineers (IEEE) overlap far less than the physics would suggest.\n",
      "translation_table": [
        {
          "field_a_term": "Helmholtz resonator f₀ = (c/2pi) sqrt(A/VL)",
          "field_b_term": "vocal tract formant frequencies — F1, F2 shaped by tongue and lip position",
          "note": "Human vowel production is Helmholtz resonator theory applied to a time-varying cavity"
        },
        {
          "field_a_term": "radar range resolution delta_R = c / (2B)",
          "field_b_term": "bat echolocation range resolution delta_R ≈ 2-3 mm",
          "note": "Bats achieve near-physical-limit range discrimination through wide-bandwidth FM sweeps"
        },
        {
          "field_a_term": "Doppler shift f_echo = f_emit * (c + v_target) / (c - v_bat)",
          "field_b_term": "horseshoe bat Doppler compensation — adjusts call frequency to null own velocity",
          "note": "CF bats maintain echo frequency in acoustic fovea; measured to 0.1 Hz precision"
        },
        {
          "field_a_term": "interaural time difference (ITD) — binaural cue",
          "field_b_term": "barn owl 1-2 microsecond ITD resolution — near spike-timing limit",
          "note": "Jeffress model delay-line coincidence detector implemented in nucleus laminaris"
        },
        {
          "field_a_term": "SOFAR channel (ocean acoustic waveguide)",
          "field_b_term": "whale long-range communication — basin-scale song propagation",
          "note": "Sound velocity minimum at ~1000m depth creates a waveguide confining low-frequency sound"
        }
      ],
      "references": [
        {
          "note": "Greenewalt (1968) Bird Song: Acoustics and Physiology. Smithsonian Institution Press"
        },
        {
          "doi": "10.1126/science.441058",
          "note": "Simmons (1979) Science 204:1336 — bat echolocation target ranging"
        },
        {
          "doi": "10.1126/science.644324",
          "note": "Knudsen & Konishi (1978) Science 200:795 — barn owl sound localization"
        },
        {
          "note": "Au (1993) The Sonar of Dolphins. Springer"
        }
      ],
      "last_reviewed": "2026-05-06",
      "file": "cross-domain/physics-biology/b-bioacoustics-sound-production.yaml"
    },
    {
      "id": "b-bioenergetics-proton-motive-force",
      "title": "Mitchell's chemiosmotic hypothesis — proton electrochemical gradient (PMF ≈ 200 mV) across the inner mitochondrial membrane drives Boyer's rotary ATP synthase F₀F₁ molecular motor, unifying thermodynamic free-energy transduction with nanoscale mechanical rotation in the universal energy currency of all life.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "Mitchell (1961) proposed that the free energy of electron transport is stored not as a chemical intermediate but as a proton electrochemical gradient across the inner mitochondrial membrane: Δμ_H⁺ = FΔψ − 2.3RT·ΔpH (proton motive force, PMF ≈ 200 mV). The respiratory chain complexes act as proton pu",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-pmf-bacterial-flagella-atp-synthase-evolutionary-homology"
      ],
      "communication_gap": "Mitchell's 1961 proposal was initially rejected by biochemists who expected a chemical intermediate (the \"squiggle\" ~P). For 15 years, the chemiosmotic hypothesis was a minority view, debated primarily in biochemistry journals (Biochem J, BBA) with little engagement from physicists. The biophysical formalism (electrophysiology, membrane potential measurement) required electrode techniques from physiology. Boyer's rotary mechanism was doubted until Walker's structural confirmation and Noji's 1997 direct visualisation of γ-subunit rotation — each in a different sub-community (solution biochemistry, X-ray crystallography, single-molecule fluorescence). The field still has a terminology gap: physicists use \"electrochemical potential\" and \"torque\"; biochemists use \"proton motive force\" and \"conformational change\" for the same concept.\n",
      "translation_table": [
        {
          "field_a_term": "proton motive force Δμ_H⁺ = FΔψ − 2.3RT·ΔpH",
          "field_b_term": "Gibbs free energy stored in the transmembrane electrochemical gradient",
          "note": "the physics potential energy difference directly drives biological work"
        },
        {
          "field_a_term": "rotary electric motor (F₀ c-ring rotation by H⁺ flux)",
          "field_b_term": "Boyer's F₀F₁ ATP synthase molecular motor",
          "note": "first biological nanomachine directly visualised rotating by Noji et al. (1997)"
        },
        {
          "field_a_term": "Carnot efficiency limit for isothermal free-energy transducer",
          "field_b_term": "near-unity efficiency of ATP synthase under physiological PMF"
        },
        {
          "field_a_term": "electromagnetic torque in electric motor",
          "field_b_term": "PMF-driven torque on F₀ c-ring driving γ-subunit rotation"
        },
        {
          "field_a_term": "mechanical work output per cycle",
          "field_b_term": "ATP molecules synthesised per full rotation (3 ATP / 360°)"
        }
      ],
      "references": [
        {
          "doi": "10.1038/191144a0",
          "note": "Mitchell (1961) Nature 191:144 — original chemiosmotic hypothesis"
        },
        {
          "note": "Boyer (1997) Nobel Lecture — binding change mechanism and rotary catalysis"
        },
        {
          "doi": "10.1002/anie.199723081",
          "note": "Walker (1998) Angew Chem Int Ed 37:2308 — crystal structure of F₁-ATPase"
        },
        {
          "doi": "10.1038/sj.emboj.7600975",
          "note": "Dimroth et al. (2006) EMBO J 25:1 — Na⁺-translocating ATP synthase stoichiometry"
        },
        {
          "note": "Harold (1986) The Vital Force: A Study of Bioenergetics — W. H. Freeman"
        }
      ],
      "last_reviewed": "2026-05-06",
      "file": "cross-domain/physics-biology/b-bioenergetics-proton-motive-force.yaml"
    },
    {
      "id": "b-brownian-motion-cell-diffusion",
      "title": "Einstein's 1905 Brownian motion theory and the Stokes-Einstein relation govern macromolecular diffusion in living cells, where anomalous subdiffusion arising from cytoplasmic crowding reveals a glass-transition-like phenomenon in the intracellular environment.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "Einstein (1905) derived the mean-squared displacement ⟨x²⟩ = 2Dt for a Brownian particle, with diffusion coefficient D = kT/(6πηr) (Stokes-Einstein relation). This result directly governs the kinetics of biochemical reactions inside cells: diffusion-limited reaction rates k_diff = 4πDr depend on D, ",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-cytoplasm-glass-transition"
      ],
      "communication_gap": "Statistical physicists who study glass transitions rarely read cell biology journals, and cell biologists who measure diffusion by FRAP or FCS rarely connect their η estimates to glass-transition physics. The anomalous diffusion literature is split between physics journals (Phys Rev Lett) and biophysics journals (Biophys J) with little cross-citation.\n",
      "translation_table": [
        {
          "field_a_term": "mean-squared displacement ⟨x²⟩ = 2Dt",
          "field_b_term": "single-molecule trajectory MSD measured by fluorescence tracking",
          "note": "directly measurable in vivo; deviation from linearity signals anomalous diffusion"
        },
        {
          "field_a_term": "diffusion coefficient D = kT/(6πηr)",
          "field_b_term": "in vivo diffusivity of signaling molecules and transcription factors",
          "note": "governs whether reactions are diffusion-limited or reaction-limited"
        },
        {
          "field_a_term": "viscosity η (bulk solvent)",
          "field_b_term": "effective cytoplasmic viscosity (macromolecular crowding parameter)",
          "note": "3–100× higher than water; probe-size and time-scale dependent"
        },
        {
          "field_a_term": "anomalous diffusion exponent α < 1 (subdiffusion)",
          "field_b_term": "cytoplasmic crowding and caging by organelles and cytoskeleton",
          "note": "Golding & Cox (2006) observed α ≈ 0.75 in E. coli cytoplasm"
        },
        {
          "field_a_term": "glass transition temperature T_g",
          "field_b_term": "transition between liquid-like and glass-like cytoplasmic states",
          "note": "cells may regulate this transition to control reaction kinetics"
        },
        {
          "field_a_term": "diffusion-limited rate k_diff = 4πDr",
          "field_b_term": "maximum possible enzymatic reaction rate set by encounter frequency"
        }
      ],
      "references": [
        {
          "doi": "10.1002/andp.19053220806",
          "note": "Einstein (1905) — On the motion of small particles suspended in a stationary liquid (Brownian motion)"
        },
        {
          "note": "Stokes (1851) — On the effect of the internal friction of fluids on the motion of pendulums; Trans Camb Phil Soc"
        },
        {
          "doi": "10.1146/annurev.biophys.26.1.373",
          "note": "Saxton & Jacobson (1997) — Single-particle tracking; applications to membrane dynamics; Annu Rev Biophys 26:373"
        },
        {
          "doi": "10.1103/PhysRevLett.96.098102",
          "note": "Golding & Cox (2006) — Physical nature of bacterial cytoplasm; anomalous subdiffusion in E. coli; Phys Rev Lett 96:098102"
        }
      ],
      "last_reviewed": "2026-05-06",
      "file": "cross-domain/physics-biology/b-brownian-motion-cell-diffusion.yaml"
    },
    {
      "id": "b-brownian-motion-molecular-motors",
      "title": "Einstein's Brownian motion formalism (1905) sets the thermal noise floor that molecular motors (kinesin, dynein, myosin V) must overcome to perform directed mechanical work, connecting statistical physics of diffusion to the mechanochemistry of the cytoskeleton.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "Einstein's 1905 derivation of Brownian motion gives ⟨x²⟩ = 2Dt with diffusion coefficient D = k_BT/(6πηr) (Stokes-Einstein relation), quantifying thermal noise as a function of temperature, viscosity, and particle size. This is the same thermal bath in which molecular motors operate. Kinesin walks p",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-molecular-motor-efficiency-fluctuation-theorem"
      ],
      "communication_gap": "Cell biologists typically learn Michaelis-Menten kinetics and do not engage with non-equilibrium statistical mechanics; statistical physicists do not routinely read Cell or Nature Cell Biology. The mathematical language of fluctuation theorems (Crooks, Jarzynski) is not taught in biophysics courses, and the experimental techniques (optical tweezers) are physically specialized enough to create an instrumentation barrier between the communities.\n",
      "translation_table": [
        {
          "field_a_term": "mean squared displacement ⟨x²⟩ = 2Dt",
          "field_b_term": "root-mean-square stepping deviation of motor position",
          "note": "Thermal noise sets the positional uncertainty that motor stepping must overcome"
        },
        {
          "field_a_term": "Stokes drag coefficient 6πηr",
          "field_b_term": "viscous load on motor cargo",
          "note": "Determines how fast a motor must step to overcome diffusion"
        },
        {
          "field_a_term": "Boltzmann factor exp(-ΔG/k_BT)",
          "field_b_term": "ATP hydrolysis rate vs. reverse transition probability",
          "note": "Detailed balance / Crooks fluctuation theorem relate forward and reverse motor rates"
        },
        {
          "field_a_term": "Jarzynski equality ⟨exp(-W/k_BT)⟩ = exp(-ΔF/k_BT)",
          "field_b_term": "free-energy measurement from optical-tweezer pulling experiments",
          "note": "Allows ΔG of protein folding / motor cycle to be measured from irreversible pulls"
        },
        {
          "field_a_term": "pN force at nanometre scale",
          "field_b_term": "kinesin stall force ~6 pN, myosin V ~2 pN",
          "note": "Optical tweezers directly measure these forces confirming Einstein-scale physics"
        }
      ],
      "references": [
        {
          "doi": "10.1002/andp.19053220806",
          "note": "Einstein (1905) — Brownian motion, Ann. Phys. 17:549; derivation of ⟨x²⟩=2Dt"
        },
        {
          "note": "Howard (2001) Mechanics of Motor Proteins and the Cytoskeleton, Sinauer"
        },
        {
          "doi": "10.1038/348348a0",
          "note": "Block et al. (1990) — Bead movement by single kinesin molecules studied with optical tweezers, Nature 348:348"
        },
        {
          "doi": "10.1103/PhysRevLett.78.2690",
          "note": "Jarzynski (1997) — Nonequilibrium equality for free energy differences, Phys Rev Lett 78:2690"
        }
      ],
      "last_reviewed": "2026-05-06",
      "file": "cross-domain/physics-biology/b-brownian-motion-molecular-motors.yaml"
    },
    {
      "id": "b-cell-division-spindle-assembly",
      "title": "Biophysics of Cell Division and Spindle Assembly — microtubule dynamic instability, motor force balance, and the spindle assembly checkpoint ensure faithful chromosome segregation",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "The mitotic spindle is a transient bipolar structure of microtubules (MTs) that must capture, align, and segregate chromosomes with near-perfect fidelity in every cell division. Dynamic instability (Mitchison & Kirschner 1984): individual MTs stochastically switch between polymerisation (GTP-tubulin",
      "open_unknowns": [],
      "related_hypotheses": [],
      "communication_gap": "The physics of MT dynamics (stochastic modelling, force generation, viscoelastic spindle mechanics) is developed by biophysicists while the molecular mechanisms of the SAC and Aurora B kinase are studied by cell biologists. These communities publish in different journals (Biophys J vs J Cell Biol) and use different experimental tools (laser ablation, laser trapping vs. biochemical reconstitution, fluorescence imaging). Quantitative physical models of spindle function that incorporate molecular detail remain an active frontier requiring genuine cross- disciplinary collaboration.\n",
      "translation_table": [
        {
          "field_a_term": "GTP cap (terminal GTP-tubulin subunits)",
          "field_b_term": "kinetic safety cap preventing catastrophe during MT polymerisation",
          "note": "GTP hydrolysis (slower than addition) creates a lag; if addition stops, hydrolysis catches up and catastrophe ensues"
        },
        {
          "field_a_term": "dynamic instability (stochastic switching)",
          "field_b_term": "search-and-capture mechanism for kinetochore attachment",
          "note": "Dynamic instability creates an exploratory polymer; biased rescue/catastrophe rates near kinetochores focus MT ends on chromosomes"
        },
        {
          "field_a_term": "spindle assembly checkpoint (SAC/MCC)",
          "field_b_term": "wait-anaphase signal from unattached kinetochore",
          "note": "~200 Mad2 molecules per kinetochore; catalytic amplification releases ~1000 MCC molecules per minute from one unattached kinetochore"
        },
        {
          "field_a_term": "Aurora B tension-sensing and error correction",
          "field_b_term": "proof-reading mechanism releasing improperly attached kinetochore–MT interactions",
          "note": "Spatial separation: Aurora B (centromere) and its substrates (outer kinetochore) are pulled apart by correct attachment tension → reduced phosphorylation → stable attachment"
        },
        {
          "field_a_term": "force balance at metaphase plate",
          "field_b_term": "mechanical equilibrium determining chromosome congression geometry",
          "note": "Kinesin-5 outward push balanced by kinetochore tension and chromokinesin polar ejection forces; plate position determined by motor balance"
        },
        {
          "field_a_term": "MT poleward flux (kinetochore MT depolymerisation at poles)",
          "field_b_term": "continuous force generation via MT treadmilling in the spindle",
          "note": "Flux rate ~2 μm/min; contributes ~50% of poleward kinetochore force in animal cells; speckle microscopy measurement"
        }
      ],
      "references": [
        {
          "doi": "10.1038/312237a0",
          "note": "Mitchison & Kirschner (1984) Nature 312:237 — dynamic instability of microtubules"
        },
        {
          "doi": "10.1038/nrm2101",
          "note": "Musacchio & Salmon (2007) Nat Rev Mol Cell Biol 8:379 — spindle assembly checkpoint"
        },
        {
          "doi": "10.1038/nrm2808",
          "note": "Walczak et al. (2010) Nat Rev Mol Cell Biol 11:91 — mechanisms of chromosome segregation"
        },
        {
          "note": "Lodish et al. (2021) Molecular Cell Biology — cell division chapter"
        }
      ],
      "last_reviewed": "2026-05-06",
      "file": "cross-domain/physics-biology/b-cell-division-spindle-assembly.yaml"
    },
    {
      "id": "b-cochlear-mechanics-hearing",
      "title": "The mammalian cochlea is a hydromechanical frequency analyzer governed by Navier-Stokes fluid dynamics and outer hair cell electromotility implementing a biological active feedback amplifier near a Hopf bifurcation, providing 40-60 dB of gain with remarkable frequency selectivity through a piezoelectric-like molecular mechanism, bridging fluid mechanics, biophysics, and nonlinear dynamics.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "The mammalian cochlea is a hydromechanical frequency analyzer — a tapered fluid- filled tube where each position resonates to a specific frequency (place theory, von Békésy 1961 Nobel). Basilar membrane vibration amplitude follows a traveling wave governed by Navier-Stokes equations for viscous flow",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-cochlear-active-amplification-hopf-bifurcation"
      ],
      "communication_gap": "Von Békésy's cochlear mechanics is taught in biophysics and audiology but not in fluid mechanics or nonlinear dynamics courses. The Hopf bifurcation connection was made by theoretical physicists (Eguiluz, Ospeck, Choe, Hudspeth, Magnasco 2000) and has not yet been fully integrated into the audiology literature.\n",
      "translation_table": [
        {
          "field_a_term": "basilar membrane traveling wave",
          "field_b_term": "Navier-Stokes solution for viscous flow in tapered channel",
          "note": "BM mechanics is exactly the hydrodynamics problem of an elastic plate in viscous fluid"
        },
        {
          "field_a_term": "outer hair cell electromotility (prestin motor)",
          "field_b_term": "piezoelectric transducer (electrical → mechanical displacement)",
          "note": "prestin is the only known biological piezoelectric; basis of cochlear amplifier"
        },
        {
          "field_a_term": "spontaneous otoacoustic emissions (SOAEs)",
          "field_b_term": "limit cycle oscillations near a Hopf bifurcation",
          "note": "SOAEs are faint tones emitted by the ear; arise when active amplification overcorrects"
        },
        {
          "field_a_term": "cochlear tuning (Q_ERB ~ 8-10)",
          "field_b_term": "quality factor of resonant oscillator: Q = f_0/Δf",
          "note": "active amplification raises Q far above passive (~1); Hopf mechanism provides frequency sharpening"
        },
        {
          "field_a_term": "basilar membrane compression (1 dB output per 3 dB input)",
          "field_b_term": "(f - f_c)^{1/3} critical oscillator power law",
          "note": "cochlear compression = signature of operation near Hopf bifurcation"
        }
      ],
      "references": [
        {
          "note": "von Békésy (1960) Experiments in Hearing; McGraw-Hill (Nobel Prize 1961)"
        },
        {
          "doi": "10.1038/35070638",
          "note": "Hudspeth (2001) Nature 414:170 — Hopf bifurcation and the cochlear amplifier"
        },
        {
          "doi": "10.1152/physrev.00040.2007",
          "note": "Ashmore (2008) Physiol Rev 88:173 — OHC electromotility and prestin"
        },
        {
          "note": "Keener & Sneyd (2009) Mathematical Physiology; Springer"
        }
      ],
      "last_reviewed": "2026-05-06",
      "file": "cross-domain/physics-biology/b-cochlear-mechanics-hearing.yaml"
    },
    {
      "id": "b-diffusion-limited-aggregation-x-fractal-growth",
      "title": "Diffusion-limited aggregation x Fractal biological growth — DLA as dendritic morphogenesis\n",
      "source_domain": "",
      "target_domain": "",
      "status": "proposed",
      "bridge_claim": "Diffusion-limited aggregation (DLA) generates fractal cluster morphologies with fractal dimension D approximately 1.71 in 2D; branching patterns in snowflakes, lightning, coral, and lung bronchial trees all exhibit the same fractal dimension, indicating that DLA is the universal growth law for syste",
      "open_unknowns": [],
      "related_hypotheses": [],
      "communication_gap": "Physicists studying DLA since the Witten-Sander (1981) paper and biologists studying branching morphogenesis both produce fractal structures but rarely cite each other; the connection was made explicit in the 1990s but quantitative comparison of fractal dimensions between physical DLA and biological systems is not systematic.\n",
      "translation_table": [
        {
          "field_a_term": "diffusion field around DLA cluster (physics)",
          "field_b_term": "morphogen/nutrient gradient around growing biological structure (biology)",
          "note": "Branching morphogenesis is driven by diffusion of signaling molecules or nutrients toward growth tips — the same physics as DLA"
        },
        {
          "field_a_term": "DLA fractal dimension D = 1.71 in 2D (physics)",
          "field_b_term": "fractal dimension of biological branching networks (biology)",
          "note": "Lung airways, retinal vasculature, and neuronal dendrites exhibit fractal dimensions close to DLA predictions"
        },
        {
          "field_a_term": "tip instability in DLA (physics)",
          "field_b_term": "lateral inhibition in branching morphogenesis (biology)",
          "note": "DLA tips grow preferentially due to enhanced diffusion field; biological tips are stabilized by lateral inhibition signals (BMP, FGF) creating the same instability"
        },
        {
          "field_a_term": "DLA cluster growth rate proportional to grad(phi) (physics)",
          "field_b_term": "chemotaxis velocity proportional to morphogen gradient (biology)",
          "note": "Cells grow toward higher morphogen concentration just as DLA clusters grow proportional to the local diffusion flux"
        }
      ],
      "references": [
        {
          "doi": "10.1103/PhysRevLett.47.1400",
          "note": "Witten & Sander (1981) - Diffusion-limited aggregation, a kinetic critical phenomenon; Phys Rev Lett 47:1400"
        }
      ],
      "last_reviewed": "2026-05-07",
      "file": "cross-domain/physics-biology/b-diffusion-limited-aggregation-x-fractal-growth.yaml"
    },
    {
      "id": "b-electrophysiology-action-potential",
      "title": "The Hodgkin-Huxley equations translate membrane biophysics into a nonlinear dynamical system identical in structure to van der Pol oscillators, and the cable equation governing AP propagation is the same parabolic PDE that describes heat conduction and diffusion — myelination as topology-optimised insulation achieving 100× velocity gain.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "The Hodgkin-Huxley (HH) model describes the action potential using a membrane circuit: C_m dV/dt = -g_Na m³h(V-E_Na) - g_K n⁴(V-E_K) - g_L(V-E_L) + I_ext. Each conductance variable (m, h, n) obeys a first-order kinetic equation dx/dt = α_x(V)(1-x) - β_x(V)x with voltage-dependent rates measured by H",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-myelination-optimal-axon-diameter-conduction-velocity"
      ],
      "communication_gap": "Electrophysiology is taught primarily in neuroscience and biophysics programmes; the mathematical equivalence to cable theory and nonlinear dynamical systems is rarely emphasised to biologists, who learn the HH model phenomenologically. Electrical engineers who know cable theory and transmission lines rarely connect their expertise to axonal conduction. The myelination optimality argument (minimise wiring cost for given velocity) is known in theoretical neuroscience but seldom taught to neurologists or MS researchers.\n",
      "translation_table": [
        {
          "field_a_term": "cable equation (∂V/∂t = (d/4R_i)∂²V/∂x²)",
          "field_b_term": "axon AP propagation (spatial spread of depolarisation)",
          "note": "Identical PDE to heat conduction; Lord Kelvin derived it for the transatlantic telegraph cable"
        },
        {
          "field_a_term": "space constant λ = √(r_m/r_i) (passive cable)",
          "field_b_term": "electrotonic length of dendrites and axon initial segment",
          "note": "Determines how far a subthreshold signal decays exponentially before becoming negligible"
        },
        {
          "field_a_term": "nonlinear oscillator limit cycle",
          "field_b_term": "action potential waveform (depolarisation → repolarisation → hyperpolarisation)",
          "note": "HH equations are topologically equivalent to van der Pol oscillator in reduced 2D models (FitzHugh-Nagumo)"
        },
        {
          "field_a_term": "conduction velocity θ ∝ d (myelinated)",
          "field_b_term": "saltatory conduction — current jumps between nodes of Ranvier",
          "note": "Myelin wraps increase λ dramatically; AP regenerates only at unmyelinated nodes"
        },
        {
          "field_a_term": "voltage clamp (command voltage, measure current)",
          "field_b_term": "ionic channel characterisation (separate Na⁺, K⁺ conductance kinetics)",
          "note": "Hodgkin-Huxley used Cole's voltage clamp to isolate individual conductance time courses"
        }
      ],
      "references": [
        {
          "doi": "10.1113/jphysiol.1952.sp004764",
          "note": "Hodgkin & Huxley (1952) J Physiol 117:500 — the complete HH model of the action potential"
        },
        {
          "doi": "10.1098/rspb.1946.0024",
          "note": "Hodgkin & Rushton (1946) Proc R Soc B 133:444 — electrical constants of a crustacean nerve fibre; cable theory"
        },
        {
          "note": "Koch, C. (1999) Biophysics of Computation. Oxford University Press — comprehensive treatment of cable theory and compartmental models"
        },
        {
          "doi": "10.1038/nrn2781",
          "note": "Nave (2010) Nat Rev Neurosci 11:275 — myelination and axon integrity; oligodendrocyte biology"
        }
      ],
      "last_reviewed": "2026-05-06",
      "file": "cross-domain/physics-biology/b-electrophysiology-action-potential.yaml"
    },
    {
      "id": "b-entropy-production-x-living-systems",
      "title": "Entropy production ↔ Living systems — life as dissipative structure",
      "source_domain": "",
      "target_domain": "",
      "status": "proposed",
      "bridge_claim": "Living organisms are dissipative structures (Prigogine) that maintain low internal entropy by exporting entropy to the environment; the minimum entropy production theorem and maximum entropy production principle both apply to biological homeostasis, connecting non-equilibrium thermodynamics to metab",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-entropy-production-x-living-systems"
      ],
      "communication_gap": "Prigogine's Nobel work (1977) was developed in physical chemistry and largely ignored by mainstream biology for decades; evolutionary biology and ecology developed metabolic scaling laws (West-Brown-Enquist 1997) independently without reference to the dissipative structure framework, leading to parallel literatures that have only recently begun to converge.",
      "translation_table": [
        {
          "field_a_term": "entropy production rate σ (non-equilibrium thermodynamics)",
          "field_b_term": "metabolic heat dissipation in living cells (biology)",
          "note": "Cells maintain low internal entropy by exporting σ via heat and waste products"
        },
        {
          "field_a_term": "dissipative structure (self-organized, far-from-equilibrium)",
          "field_b_term": "living organism maintaining homeostasis against equilibrium",
          "note": "Prigogine's 1977 Nobel work: life is thermodynamically sustained by throughput"
        },
        {
          "field_a_term": "minimum entropy production principle (near-equilibrium steady state)",
          "field_b_term": "metabolic rate set-point regulation in basal metabolism",
          "note": "Onsager reciprocal relations predict minimum entropy production at homeostasis"
        },
        {
          "field_a_term": "maximum entropy production principle (far-from-equilibrium selection)",
          "field_b_term": "evolutionary selection for higher metabolic throughput",
          "note": "MEP principle may explain why complex life dissipates more energy per unit mass"
        }
      ],
      "references": [
        {
          "doi": "10.1007/s12064-010-0097-5",
          "note": "Unrean & Srienc (2011) — entropy production and dissipative structures in biology"
        },
        {
          "doi": "10.1103/PhysRevLett.75.1226",
          "note": "Vicsek et al. (1995) — related non-equilibrium order in biology"
        }
      ],
      "last_reviewed": "2026-05-07",
      "file": "cross-domain/physics-biology/b-entropy-production-x-living-systems.yaml"
    },
    {
      "id": "b-flagellar-motor-rotary-machines",
      "title": "The bacterial flagellar motor is a biological rotary machine powered by proton motive force ΓÇö identical in energy source to ATP synthase ΓÇö that generates 1270 pN┬╖nm stall torque, rotates at 1700 Hz, and implements perfect chemotactic adaptation via CheY-P switching of CCW/CW rotation.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "The bacterial flagellar motor (BFM) is a rotary molecular machine that directly converts electrochemical energy (proton motive force, PMF = ╬ö╬¿ + ╬öpH) into mechanical rotation ΓÇö the same energy source as ATP synthase (Bridge 352) but used for locomotion rather than ATP synthesis. Structure: the ",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-flagellar-motor-stator-assembly-pmf-dependent-mechanosensing"
      ],
      "communication_gap": "Physicists who study the BFM as a mechanochemical engine (torque-speed, efficiency, noise) rarely engage with the systems biology literature on chemotaxis network design (integral feedback, robustness). Cell biologists who study chemotaxis signaling rarely engage with the biophysics of motor mechanics. The BFM is a rare case where molecular biology, biophysics, and systems biology all converge.\n",
      "translation_table": [
        {
          "field_a_term": "proton motive force (PMF = ╬ö╬¿ + (RT/F)╬öpH)",
          "field_b_term": "electrochemical free energy driving rotor; thermodynamic \"fuel\"",
          "note": "same energy currency as ATP synthase; efficiency approaches thermodynamic limit at low speed"
        },
        {
          "field_a_term": "stall torque T Γëê 1270 pN┬╖nm",
          "field_b_term": "maximum mechanical work per revolution (W = T┬╖2╧Ç Γëê 8000 pN┬╖nm)",
          "note": "pN┬╖nm scale is typical of molecular machines; compare to myosin (few pN┬╖nm)"
        },
        {
          "field_a_term": "CheY-P (phosphorylated response regulator)",
          "field_b_term": "allosteric switch signal binding the C-ring (FliM)",
          "note": "chemical signal ΓåÆ mechanical switching (CCW/CW) ΓÇö the molecular logic gate of chemotaxis"
        },
        {
          "field_a_term": "MCP methylation / demethylation (CheR/CheB)",
          "field_b_term": "integral feedback control ΓÇö steady-state output independent of input level",
          "note": "perfect adaptation is a mathematical property of integral feedback (Alon 2007)"
        },
        {
          "field_a_term": "bead assay (tethered rotation)",
          "field_b_term": "single-molecule biophysics ΓÇö directly measures torque-speed curve",
          "note": "optically trapped bead allows viscous load variation; maps full mechanical output"
        }
      ],
      "references": [
        {
          "doi": "10.1146/annurev.biochem.72.091801.161737",
          "note": "Berg (2003) The rotary motor of bacterial flagella; Annu Rev Biochem 72:19"
        },
        {
          "doi": "10.1016/S0014-5793(03)00310-7",
          "note": "Blair (2003) Flagellar movement driven by proton translocation; FEBS Lett 545:86"
        },
        {
          "doi": "10.1073/pnas.0601874103",
          "note": "Xing et al. (2006) Torque-speed relationship of the bacterial flagellar motor; PNAS 103:1260"
        },
        {
          "note": "Alon (2007) An Introduction to Systems Biology; Chapman & Hall"
        }
      ],
      "last_reviewed": "2026-05-07",
      "file": "cross-domain/physics-biology/b-flagellar-motor-rotary-machines.yaml"
    },
    {
      "id": "b-flagellar-motor-rotary-mechanics",
      "title": "The bacterial flagellar motor is a nanoscale rotary machine applying the same electrochemical-to-mechanical transduction principles as macroscopic electric motors: the proton motive force (PMF = Δψ + 2.3RT/F × ΔpH) drives torque generation at ~1000 pN·nm via stator-rotor ion channel mechanics, rotating at up to 1700 rpm.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "The bacterial flagellar motor (BFM) converts the proton motive force (PMF) — the electrochemical gradient across the inner membrane — into mechanical rotation. PMF = Δψ - (RT/F)ΔpH where Δψ is the membrane potential (~-100 to -170 mV) and ΔpH is the pH gradient. The stator complex (MotA/MotB, or Pom",
      "open_unknowns": [],
      "related_hypotheses": [],
      "communication_gap": "Physicists studying molecular motors (Evans, Bustamante, Bustamante) and microbiologists studying chemotaxis (Adler, Berg, Brown) developed largely separate literatures. Berg & Anderson (1973) proposed the rotary motor mechanism simultaneously with Howard Berg's own chemotaxis work; the connection to electromechanical principles was made explicit by Berry (2003) but remains underemphasized in microbiology courses. Motor protein biophysics textbooks rarely compare BFMs to macroscopic electric motors.\n",
      "translation_table": [
        {
          "field_a_term": "proton motive force PMF = Δψ + 2.3RT/F × ΔpH (kJ/mol)",
          "field_b_term": "voltage across electric motor armature × charge carrier flux",
          "note": "PMF is directly analogous to EMF in Faraday's law of induction; units differ but physics identical"
        },
        {
          "field_a_term": "stator ion channel (MotA/MotB) proton translocation",
          "field_b_term": "electromagnetic stator winding driving rotor torque",
          "note": "ion current through the stator is the biological analog of current in a motor coil"
        },
        {
          "field_a_term": "C-ring rotor (FliM/FliN/FliG) — 26-34 FliM subunits",
          "field_b_term": "permanent magnet rotor in synchronous motor",
          "note": "the discrete subunit positions create quantal torque steps of ~1 pN·nm per subunit"
        },
        {
          "field_a_term": "stator unit number (1-11 MotA/MotB complexes, adaptive)",
          "field_b_term": "number of active motor coil pairs (power scaling)",
          "note": "BFM torque scales linearly with stator number — biological gear shifting"
        },
        {
          "field_a_term": "switching rate (CheY-P controlled CW↔CCW flip)",
          "field_b_term": "direction reversal in a DC motor via commutator",
          "note": "CheY-P allosterically binds FliM to shift C-ring conformation, reversing rotation direction"
        }
      ],
      "references": [
        {
          "doi": "10.1146/annurev.biochem.72.121801.161737",
          "note": "Berg (2003) — The rotary motor of bacterial flagella; Annu Rev Biochem 72:19"
        },
        {
          "doi": "10.1017/S0033583507004507",
          "note": "Sowa & Berry (2008) — Bacterial flagellar motor; Q Rev Biophys 41:103"
        },
        {
          "doi": "10.1126/science.aab4070",
          "note": "Xue et al. (2015) — Visualizing the c-ring of the flagellar motor; Science 350:491"
        },
        {
          "doi": "10.1038/386299a0",
          "note": "Noji et al. (1997) — Direct observation of the rotation of F₁-ATPase; Nature 386:299"
        }
      ],
      "last_reviewed": "2026-05-06",
      "file": "cross-domain/physics-biology/b-flagellar-motor-rotary-mechanics.yaml"
    },
    {
      "id": "b-flagellar-motor-x-rotary-engine",
      "title": "Bacterial flagellar motor x Rotary engine - proton gradient as mechanical torque\n",
      "source_domain": "",
      "target_domain": "",
      "status": "proposed",
      "bridge_claim": "The bacterial flagellar motor converts the transmembrane proton-motive force (delta mu_H+ = -RTln([H+]_in/[H+]_out) - F*delta_psi) into rotational torque at 100-300 Hz with near 100% thermodynamic efficiency near stall; it is a biological implementation of an electrochemical rotary engine where each",
      "open_unknowns": [],
      "related_hypotheses": [],
      "communication_gap": "Microbiologists studying flagellar motility and engineers designing rotary motors developed parallel frameworks for torque generation; Berg and colleagues (1973, 2003) established the flagellar motor as a proton-driven rotary engine, and thermodynamic analysis (Meister & Berg 1987) derived near-Carnot efficiency near stall — but the design principles (stepwise proton coupling, near-Carnot operation, torque-speed linearity) have not been systematically imported into synthetic molecular motor design.\n",
      "translation_table": [
        {
          "field_a_term": "proton-motive force PMF = delta_psi - (RT/F) * delta_pH (biophysics)",
          "field_b_term": "electrochemical potential difference driving electrochemical work (thermodynamics)",
          "note": "PMF is the electrochemical gradient that powers the motor; it plays the role of voltage in an electromechanical transducer"
        },
        {
          "field_a_term": "stator protein MotA/MotB conducting protons (microbiology)",
          "field_b_term": "stator winding of electric motor converting current to torque (engineering)",
          "note": "Each stator unit is a proton channel that exchanges a proton for a discrete angular step, analogous to coil-winding torque generation"
        },
        {
          "field_a_term": "flagellar motor torque-speed curve (biophysics)",
          "field_b_term": "torque-speed characteristic of DC motor (electrical engineering)",
          "note": "The flagellar motor shows a linear torque-speed relationship analogous to the DC motor characteristic; stall torque ~ 1200 pN nm"
        },
        {
          "field_a_term": "flagellar motor stall efficiency near 100% (biophysics)",
          "field_b_term": "Carnot efficiency limit for reversible engine (thermodynamics)",
          "note": "Near stall, the motor operates reversibly (near thermodynamic equilibrium) approaching Carnot efficiency; at high speed efficiency drops"
        }
      ],
      "references": [
        {
          "doi": "10.1098/rsif.2006.0133",
          "note": "Berg (2003) - The rotary motor of bacterial flagella; Annu Rev Biochem 72:19 — torque-speed and efficiency analysis"
        }
      ],
      "last_reviewed": "2026-05-07",
      "file": "cross-domain/physics-biology/b-flagellar-motor-x-rotary-engine.yaml"
    },
    {
      "id": "b-liquid-crystal-x-cell-membrane",
      "title": "Liquid crystals x Cell membranes — lipid bilayer as smectic-A phase\n",
      "source_domain": "",
      "target_domain": "",
      "status": "proposed",
      "bridge_claim": "The lipid bilayer cell membrane is a biological realization of a smectic-A liquid crystal; membrane fluidity, phase transitions (lipid rafts, gel-to-fluid transition), and curvature elasticity are all governed by the same Frank elastic energy that describes liquid crystal defects and alignments — ma",
      "open_unknowns": [],
      "related_hypotheses": [],
      "communication_gap": "Liquid crystal physicists and membrane biophysicists use identical mathematical frameworks (Frank elastic energy, order parameter fields) but publish in different journals; the field of active matter has begun bridging them, but quantitative cross-disciplinary experiments remain rare.\n",
      "translation_table": [
        {
          "field_a_term": "smectic-A layered order (liquid crystal physics)",
          "field_b_term": "lipid bilayer lamellar structure (cell biology)",
          "note": "The bilayer's two leaflets correspond to the smectic-A layers; amphiphile self-assembly is driven by the same hydrophobic packing that drives smectic order"
        },
        {
          "field_a_term": "Frank elastic constants K1, K2, K3 (liquid crystal physics)",
          "field_b_term": "Helfrich bending modulus kappa (membrane biophysics)",
          "note": "Membrane curvature elasticity is the biological analog of Frank elastic energy; kappa ~ 10-20 kBT is the bending rigidity"
        },
        {
          "field_a_term": "liquid crystal phase transition (Tc) (physics)",
          "field_b_term": "lipid raft formation / gel-fluid transition (cell biology)",
          "note": "Lipid phase separation into ordered (Lo) and disordered (Ld) domains mirrors liquid crystal isotropic-nematic transitions"
        },
        {
          "field_a_term": "topological defects in liquid crystal (physics)",
          "field_b_term": "membrane curvature singularities / buds (biology)",
          "note": "Defects in membrane lipid order correspond to sites of curvature generation (endocytosis, filopodia)"
        }
      ],
      "references": [
        {
          "doi": "10.1103/RevModPhys.46.617",
          "note": "de Gennes (1974) - The physics of liquid crystals; Rev Mod Phys 46:617"
        }
      ],
      "last_reviewed": "2026-05-07",
      "file": "cross-domain/physics-biology/b-liquid-crystal-x-cell-membrane.yaml"
    },
    {
      "id": "b-mechanobiology-cellular-force-sensing",
      "title": "Cells function as living force transducers — integrin-ECM adhesion clusters convert piconewton-scale mechanical loads into gene-expression programs via talin unfolding, YAP/TAZ nuclear translocation, and durotactic migration, making biophysics and cell biology inseparable accounts of the same mechanochemical signalling system.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "Mechanobiology unifies soft-matter physics with cell biology by showing that cells actively sense, generate, and respond to mechanical forces across length scales from nanometres to tissues. The key physical mechanism is mechanotransduction — the conversion of mechanical signals into biochemical one",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-ecm-stiffness-cancer-invasion-threshold"
      ],
      "communication_gap": "Mechanobiology sits at the intersection of soft-matter physics, biophysics, and cell biology — three communities with distinct journals (Physical Review Letters, Biophysical Journal, Cell) and funding silos (NSF Physics, NIH Cell Biology). Physicists model cells as active gels but rarely track specific signalling pathways. Cell biologists measure YAP/TAZ but rarely solve the elasticity equations. The field only cohered after Engler et al. (2006) and Dupont et al. (2011) made the mechanical→biochemical link experimentally clear. Cancer biology has been slow to incorporate stiffness as a first-class parameter alongside genetics.\n",
      "translation_table": [
        {
          "field_a_term": "elastic modulus E of substrate (Pa)",
          "field_b_term": "biochemical differentiation cue for stem cells",
          "note": "stiffness alone recapitulates lineage-specific soluble factor signals"
        },
        {
          "field_a_term": "piconewton force on talin rod domain",
          "field_b_term": "allosteric exposure of vinculin-binding site (mechanochemical switch)",
          "note": "F ~ 10 pN is the physiological operating range of talin as measured by FRET tension sensors"
        },
        {
          "field_a_term": "worm-like chain model of protein domain unfolding",
          "field_b_term": "mechanotransduction signal initiation at focal adhesions",
          "note": "same polymer physics framework applied to protein mechanics"
        },
        {
          "field_a_term": "elastic traction stress field (nPa scale)",
          "field_b_term": "cell migration directionality (durotaxis)",
          "note": "traction force microscopy measures the physical quantity that determines biological behaviour"
        },
        {
          "field_a_term": "active gel theory (non-equilibrium polar fluid with motor activity)",
          "field_b_term": "actin cytoskeleton organisation and stress fibre formation",
          "note": "actomyosin cortex is a paradigmatic active matter system"
        },
        {
          "field_a_term": "stiffness gradient in ECM",
          "field_b_term": "chemotaxis-analogue (durotaxis) driving cancer invasion",
          "note": "desmoplasia creates a mechanical gradient that cancer cells follow"
        },
        {
          "field_a_term": "Hookean spring constant k of focal adhesion-ECM composite",
          "field_b_term": "YAP/TAZ nuclear localisation and mechanosensing threshold",
          "note": "YAP/TAZ nuclear fraction follows a sigmoidal function of substrate stiffness"
        }
      ],
      "references": [
        {
          "doi": "10.1016/j.cell.2006.06.044",
          "note": "Engler et al. (2006) Cell 126:677 — substrate stiffness directs stem cell differentiation"
        },
        {
          "doi": "10.1126/science.260.5111.1124",
          "note": "Wang et al. (1993) Science 260:1124 — mechanical stress controls cell growth"
        },
        {
          "doi": "10.1038/nature10137",
          "note": "Dupont et al. (2011) Nature 474:179 — YAP/TAZ as transducers of matrix stiffness"
        },
        {
          "doi": "10.1016/j.tcb.2011.04.005",
          "note": "Trepat & Fredberg (2011) Trends Cell Biol 21:638 — cell mechanics and traction forces"
        }
      ],
      "last_reviewed": "2026-05-06",
      "file": "cross-domain/physics-biology/b-mechanobiology-cellular-force-sensing.yaml"
    },
    {
      "id": "b-mechanobiology-continuum-mechanics",
      "title": "Cells sense and respond to mechanical forces through mechanotransduction, and collectively exhibit a jamming phase transition (liquid-to-solid) controlled by cell shape index — making continuum mechanics (stress tensors, viscoelasticity, phase transitions) the quantitative framework for tissue biology from single-cell durotaxis to embryonic morphogenesis.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "Tissues and cells obey continuum mechanics — the same mathematical framework (elasticity theory, fluid dynamics, statistical mechanics of phase transitions) that governs materials science. Key correspondences:\n1. CELL MECHANICS AS CONTINUUM ELASTICITY: A cell exerts traction forces on its\n   substra",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-durotaxis-cancer-metastasis"
      ],
      "communication_gap": "Continuum mechanics and materials science developed entirely separately from cell biology. The field of mechanobiology only emerged explicitly in the 2000s with Discher and Engler's experiments. The vertex model and jamming transition results (Bi et al. 2016) are from the physics literature and are not widely read by developmental biologists who study the same tissue flows using genetic rather than physical approaches. Bridging these communities requires shared experimental infrastructure (traction force microscopy, AFM) and shared mathematical language (continuum mechanics rather than molecular signalling pathway diagrams).\n",
      "translation_table": [
        {
          "field_a_term": "Stress tensor σ_ij",
          "field_b_term": "Traction forces exerted by cell on substrate",
          "note": "Measured by traction force microscopy (TFM): compute substrate displacement field, invert via Green's function"
        },
        {
          "field_a_term": "Young's modulus E (substrate stiffness)",
          "field_b_term": "Mechanosensed property that directs stem cell differentiation",
          "note": "Cells match their own stiffness to the substrate via actomyosin contractility feedback"
        },
        {
          "field_a_term": "Viscoelastic moduli E, η (Kelvin-Voigt model)",
          "field_b_term": "Tissue rheological parameters governing morphogenetic flows",
          "note": "Measurable by atomic force microscopy (AFM) indentation or optical magnetic twisting cytometry"
        },
        {
          "field_a_term": "Shape index q* = P/√A (dimensionless)",
          "field_b_term": "Order parameter for jamming transition in epithelial monolayers",
          "note": "q* < 3.81: rigid jammed tissue; q* > 3.81: flowing unjammed tissue; critical value from vertex model energy"
        },
        {
          "field_a_term": "Active polar/nematic order parameter",
          "field_b_term": "Collective cell migration direction in monolayer",
          "note": "Topological ±1/2 defects in the orientation field = sites of cell death/extrusion (+1/2) and division (-1/2)"
        },
        {
          "field_a_term": "Durotaxis",
          "field_b_term": "Cell migration directed by stiffness gradient ∇E",
          "note": "Physically: cell generates larger traction on stiffer substrate, net force directs migration up-gradient"
        },
        {
          "field_a_term": "Phase transition (jamming)",
          "field_b_term": "Epithelial-to-mesenchymal transition (EMT) in cancer invasion",
          "note": "EMT may be a mechanobiological unjamming transition: tumour cells above q* threshold become invasive"
        }
      ],
      "references": [
        {
          "doi": "10.1126/science.1116995",
          "note": "Discher, Janmey & Wang (2005) Science 310:1139 — tissue cells feel and respond to the stiffness of their substrate"
        },
        {
          "doi": "10.1016/j.cell.2006.06.044",
          "note": "Engler et al. (2006) Cell 126:677 — matrix elasticity directs stem cell lineage specification"
        },
        {
          "doi": "10.1038/nphys3471",
          "note": "Bi et al. (2016) Nat Phys 12:1085 — motility-driven glass and jamming transitions in biological tissues"
        },
        {
          "doi": "10.1038/nphys2355",
          "note": "Trepat et al. (2009) Nat Phys 5:426 — physical forces during collective cell migration"
        },
        {
          "doi": "10.1103/RevModPhys.85.1143",
          "note": "Marchetti et al. (2013) Rev Mod Phys 85:1143 — hydrodynamics of soft active matter"
        }
      ],
      "last_reviewed": "2026-05-05",
      "file": "cross-domain/physics-biology/b-mechanobiology-continuum-mechanics.yaml"
    },
    {
      "id": "b-mechanosensing-piezoelectric",
      "title": "Biological tissues (bone, collagen, DNA) exhibit piezoelectric properties bridging solid-state physics crystal mechanics to mechanobiology and Wolff's law of bone remodelling",
      "source_domain": "",
      "target_domain": "",
      "status": "proposed",
      "bridge_claim": "Piezoelectricity — the generation of electrical polarisation by mechanical stress and vice versa — appears in many biological tissues including bone, collagen, DNA, and some cell membranes. The piezoelectric tensor d_ijk relates stress σ_jk to polarisation P_i: P_i = d_ijk σ_jk. Bone exhibits measur",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-bone-piezoelectric-signal-drives-wolff-law-remodelling"
      ],
      "communication_gap": "Solid-state physicists working on piezoelectrics and biologists studying mechanosensing operate in separate communities. Clinical bone biology rarely applies piezoelectric tensor formalism. The biophysical field of mechanobiology has grown substantially but often lacks the mathematical rigour of solid-state physics treatment of piezoelectric phenomena.\n",
      "translation_table": [
        {
          "field_a_term": "piezoelectric tensor d_ijk",
          "field_b_term": "mechanosensitive coupling coefficient",
          "note": "Same mathematical object; biophysics uses it to relate mechanical input to biological signal output"
        },
        {
          "field_a_term": "crystal symmetry group (non-centrosymmetric)",
          "field_b_term": "structural anisotropy of collagen fibrils",
          "note": "Piezoelectricity requires broken inversion symmetry — collagen handedness provides this"
        },
        {
          "field_a_term": "streaming potential / piezoelectric voltage",
          "field_b_term": "mechanosensory signal to osteocytes",
          "note": "The electrical signal generated by mechanical loading is the proposed biological transducer"
        },
        {
          "field_a_term": "converse piezoelectric effect (E → strain)",
          "field_b_term": "electrically stimulated bone growth (clinical use)",
          "note": "Applied electric fields promote bone healing — converse piezo effect in biological tissue"
        }
      ],
      "references": [
        {
          "note": "Fukada & Yasuda (1957) — piezoelectric effect of bone",
          "doi": "10.1143/JPSJ.12.1158"
        },
        {
          "note": "Wolff (1892) Das Gesetz der Transformation der Knochen — bone remodelling law"
        },
        {
          "note": "Bassett & Becker (1962) — generation of electric potentials by bone in response to mechanical stress",
          "doi": "10.1126/science.137.3535.1063"
        },
        {
          "note": "Ahn & Grodzinsky (2009) — relevance of collagen piezoelectricity to wound healing",
          "doi": "10.1615/CritRevBiomedEng.v37.i1-2.20"
        }
      ],
      "last_reviewed": "2026-05-06",
      "file": "cross-domain/physics-biology/b-mechanosensing-piezoelectric.yaml"
    },
    {
      "id": "b-neurovascular-coupling-x-fluid-dynamics",
      "title": "Neurovascular coupling x Fluid dynamics - BOLD signal as Hagen-Poiseuille flow\n",
      "source_domain": "",
      "target_domain": "",
      "status": "proposed",
      "bridge_claim": "The BOLD fMRI signal arises from neurovascular coupling where neural activity triggers astrocyte-mediated vasodilation, increasing cerebral blood flow via Hagen-Poiseuille dynamics (Q proportional to r^4 delta P / eta L); the extreme r^4 radius sensitivity means small vessel diameter changes (5-10%)",
      "open_unknowns": [],
      "related_hypotheses": [],
      "communication_gap": "Neuroscientists developing fMRI analysis (Ogawa et al. 1990) and fluid dynamicists studying vascular flow built separate frameworks; the biophysical models of the BOLD response (Buxton et al. 1998, Friston et al. 2000) incorporate Hagen-Poiseuille mechanics but most neuroscientists use the HRF as a statistical model without deriving it from first principles — meaning fluid dynamic improvements (accounting for vessel geometry heterogeneity, turbulence at high flow) have not fully entered fMRI methodology.\n",
      "translation_table": [
        {
          "field_a_term": "BOLD signal change in fMRI (neuroscience measurement)",
          "field_b_term": "Hagen-Poiseuille flow rate change Q proportional to r^4 (fluid dynamics)",
          "note": "The nonlinear r^4 sensitivity amplifies small vessel diameter changes into large BOLD signals; 7% radius change gives 31% flow change"
        },
        {
          "field_a_term": "astrocyte end-feet mediating neurovascular coupling (neuroscience)",
          "field_b_term": "boundary condition control of fluid flow in cylindrical tube (fluid mechanics)",
          "note": "Astrocytes contact both neurons and blood vessels; they transduce neural activity into vasodilation via Ca2+ signaling - boundary condition control"
        },
        {
          "field_a_term": "cerebral blood flow (CBF) autoregulation (physiology)",
          "field_b_term": "flow regulation in pipe network with pressure-dependent resistance (fluid mechanics)",
          "note": "Cerebral autoregulation maintains constant CBF across blood pressure range; it maps to pressure-dependent tube compliance in Hagen-Poiseuille"
        },
        {
          "field_a_term": "hemodynamic response function (HRF) in fMRI analysis (neuroscience)",
          "field_b_term": "impulse response of a fluid dynamic low-pass filter (fluid mechanics)",
          "note": "The HRF is the fluid dynamic response of the cerebrovascular system to a neural impulse; its shape is determined by vascular compliance and viscosity"
        }
      ],
      "references": [
        {
          "doi": "10.1152/physrev.00061.2017",
          "note": "Attwell et al. (2017) - What is a pericyte? J Cereb Blood Flow Metab — neurovascular coupling and BOLD signal fluid dynamics"
        }
      ],
      "last_reviewed": "2026-05-07",
      "file": "cross-domain/physics-biology/b-neurovascular-coupling-x-fluid-dynamics.yaml"
    },
    {
      "id": "b-nonequilibrium-statistical-mechanics-metabolism",
      "title": "Biological metabolism operates as a far-from-equilibrium dissipative system governed by nonequilibrium statistical mechanics: the Jarzynski equality (e^{-βW} = e^{-βΔF}) connects work fluctuations in molecular machines to free energy differences, the fluctuation theorem quantifies entropy production in metabolic cycles, and Prigogine's minimum entropy production principle identifies the stable steady states of living systems.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "Living systems maintain themselves far from thermodynamic equilibrium by continuously dissipating free energy (ATP hydrolysis: ΔG ≈ -54 kJ/mol under physiological conditions). Classical thermodynamics gives equilibrium states; nonequilibrium statistical mechanics (NESM) describes the statistics of f",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-jarzynski-equality-molecular-motor-efficiency-measurement"
      ],
      "communication_gap": "Nonequilibrium statistical mechanics developed within physics (Onsager, Prigogine, Jarzynski, Crooks) with publications in Physical Review Letters and Journal of Chemical Physics. Biochemists and cell biologists studying metabolism rarely engage with NESM literature, even though metabolism is paradigmatically the phenomenon NESM was designed to describe. The biophysics community (Bustamante, Liphardt) has bridged this gap for single molecules, but NESM has not penetrated cellular metabolism or systems biology at the quantitative level.\n",
      "translation_table": [
        {
          "field_a_term": "Jarzynski equality (e^{-βW} = e^{-βΔF})",
          "field_b_term": "free energy of biomolecular conformation change measurable from irreversible pulling",
          "note": "RNA hairpin unfolding at constant pulling speed is irreversible yet J-equality gives ΔG_fold"
        },
        {
          "field_a_term": "entropy production rate σ̇ = J·F ≥ 0",
          "field_b_term": "metabolic heat dissipation per unit time in living tissue",
          "note": "σ̇ is measurable by microcalorimetry; correlates with cell proliferation rate and cancer aggressiveness"
        },
        {
          "field_a_term": "Onsager kinetic coefficient L_{ij} (flux = L·force)",
          "field_b_term": "coupling between metabolic fluxes (oxidative phosphorylation efficiency = L_{12}/√(L_{11}L_{22}))",
          "note": "Mitochondrial P/O ratio (ATP produced per O₂ consumed) = Onsager coupling coefficient"
        },
        {
          "field_a_term": "Crooks fluctuation theorem (forward/reverse work ratio)",
          "field_b_term": "reversibility of molecular motor stepping — kinesin takes occasional backward steps",
          "note": "Ratio of forward to backward steps of kinesin at stall force = exp(F·d/k_BT) consistent with CFT"
        },
        {
          "field_a_term": "dissipative structure (Prigogine) — maintained by continuous energy flux",
          "field_b_term": "cell — maintained far from equilibrium by ATP hydrolysis; death = approach to equilibrium",
          "note": "Cell death = thermodynamic relaxation; the cell is a Prigogine dissipative structure"
        }
      ],
      "references": [
        {
          "doi": "10.1103/PhysRevLett.78.2690",
          "note": "Jarzynski (1997) Nonequilibrium equality for free energy differences. Phys Rev Lett 78:2690"
        },
        {
          "doi": "10.1103/PhysRevE.60.2721",
          "note": "Crooks (1999) Entropy production fluctuation theorem and the nonequilibrium work relation for free energy differences. Phys Rev E 60:2721"
        },
        {
          "doi": "10.1126/science.1058498",
          "note": "Liphardt et al. (2002) Equilibrium information from nonequilibrium measurements in an experimental test of Jarzynski's equality. Science 296:1832"
        },
        {
          "note": "Prigogine (1967) Introduction to Thermodynamics of Irreversible Processes. Interscience Publishers, 3rd edn"
        }
      ],
      "last_reviewed": "2026-05-06",
      "file": "cross-domain/physics-biology/b-nonequilibrium-statistical-mechanics-metabolism.yaml"
    },
    {
      "id": "b-optogenetics-x-control-theory",
      "title": "Optogenetics ↔ Control theory — light-gated channels as actuators",
      "source_domain": "",
      "target_domain": "",
      "status": "proposed",
      "bridge_claim": "Optogenetic tools (channelrhodopsins, halorhodopsins) implement real-time feedback control of neural circuits; light pulses are control inputs, spike rates are controlled outputs, and closed-loop optogenetic stimulation implements proportional-integral control of neural dynamics — connecting neurote",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-optogenetics-x-control-theory"
      ],
      "communication_gap": "Optogenetics was developed by Deisseroth and Boyden as a neuroscience tool (Nature Methods 2005) without reference to control engineering. Control engineers studying biological systems (synthetic biology feedback loops, insulin pumps) rarely collaborated with neuroscientists using optogenetics. The closed-loop optogenetics framework explicitly importing control theory was developed only around 2012-2016 (Grosenick, Boyden group; Foutz & McIntyre), leaving a decade of missed engineering optimisation opportunities.",
      "translation_table": [
        {
          "field_a_term": "channelrhodopsin-2 (ChR2) light-gated cation channel",
          "field_b_term": "actuator element in feedback control loop (increases plant output)",
          "note": "ChR2 activation increases neuron firing — excitatory actuator; NpHR = inhibitory actuator"
        },
        {
          "field_a_term": "closed-loop optogenetics (spike rate measured, light adjusted in real-time)",
          "field_b_term": "proportional-integral (PI) feedback control loop",
          "note": "P: light proportional to error; I: integrated error prevents steady-state offset"
        },
        {
          "field_a_term": "light pulse frequency and duty cycle (control signal parameters)",
          "field_b_term": "control input u(t) in continuous-time state space model",
          "note": "Channelrhodopsin kinetics set the bandwidth; saturation gives nonlinear actuator model"
        },
        {
          "field_a_term": "neural spike rate (controlled variable being maintained at setpoint)",
          "field_b_term": "plant output y(t) in closed-loop controller",
          "note": "Spike rate setpoint corresponds to reference signal r(t) in control engineering"
        }
      ],
      "references": [
        {
          "doi": "10.1038/nn.2512",
          "note": "Boyden et al. (2005) — millisecond-timescale, genetically targeted optical control of neural activity; Nature Neurosci"
        },
        {
          "doi": "10.1038/nn.3257",
          "note": "Grosenick et al. (2015) — closed-loop and activity-guided optogenetic control; Neuron 86:106"
        }
      ],
      "last_reviewed": "2026-05-07",
      "file": "cross-domain/physics-biology/b-optogenetics-x-control-theory.yaml"
    },
    {
      "id": "b-osmosis-cell-volume-regulation",
      "title": "The van't Hoff osmotic pressure equation and aquaporin water channels connect thermodynamic solute-concentration physics to cell volume regulation, linking passive membrane transport physics with the active ion-cotransporter machinery (KCC, NKCC) that cells use to survive osmotic stress.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "Van't Hoff's 1887 equation π = iMRT establishes that osmotic pressure across a semipermeable membrane is a colligative thermodynamic quantity determined entirely by solute concentration — a purely physical result. Cells translate this into life-or-death volume regulation: hyposmotic stress causes sw",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-aqp2-trafficking-as-osmotic-valve"
      ],
      "communication_gap": "Physical chemists who work on membrane transport rarely read Cell Physiology journals; cell biologists who study RVD/RVI rarely consult the thermodynamics literature on osmotic pressure. The quantitative connection between the van't Hoff equation and cotransporter set-points is not taught in either physics or biology curricula. Agre's Nobel lecture explicitly noted that physicists and physiologists had been studying \"the same thing\" for decades without speaking to one another.\n",
      "translation_table": [
        {
          "field_a_term": "osmotic pressure π = iMRT",
          "field_b_term": "oncotic pressure driving cell swelling/shrinkage",
          "note": "The van't Hoff equation quantifies the physical driving force that cells must counteract"
        },
        {
          "field_a_term": "semipermeable membrane (thermodynamic idealization)",
          "field_b_term": "plasma membrane with selective ion channels and AQPs",
          "note": "Real biological membranes are selectively permeable via protein channels, not ideally semipermeable"
        },
        {
          "field_a_term": "water flux J_w = L_p·Δπ (hydraulic conductivity)",
          "field_b_term": "aquaporin channel density × single-channel P_f",
          "note": "Macroscopic hydraulic conductivity is the product of molecular-scale AQP abundance"
        },
        {
          "field_a_term": "equilibrium (Δπ = 0)",
          "field_b_term": "isosmotic steady state maintained by active transport",
          "note": "Cells never reach thermodynamic equilibrium — they maintain a non-equilibrium steady state via ion pumps"
        },
        {
          "field_a_term": "osmolality (mol/kg solvent)",
          "field_b_term": "intracellular vs. extracellular tonicity",
          "note": "Cells regulate their internal osmolality via compatible osmolytes (taurine, betaine) independently of volume"
        },
        {
          "field_a_term": "Gibbs free energy of water ΔG = RT ln(a_w)",
          "field_b_term": "water potential driving absorption in kidney tubules",
          "note": "ADH/AQP2 regulation tunes the effective hydraulic permeability to match physiological demand"
        }
      ],
      "references": [
        {
          "note": "van't Hoff (1887) — original osmotic pressure equation",
          "doi": "10.1515/zpch-1887-0142"
        },
        {
          "note": "Agre et al. (2002) — aquaporin structure and function review",
          "doi": "10.1016/S0959-440X(02)00323-2"
        },
        {
          "note": "Hoffmann et al. (2009) — cell volume regulation (RVD/RVI) comprehensive review",
          "doi": "10.1152/physrev.90037.2008"
        },
        {
          "note": "Strange (2004) — cell volume regulation overview",
          "doi": "10.1007/s00232-003-0637-2"
        }
      ],
      "last_reviewed": "2026-05-06",
      "file": "cross-domain/physics-biology/b-osmosis-cell-volume-regulation.yaml"
    },
    {
      "id": "b-vicsek-active-matter-flocking",
      "title": "The Vicsek model demonstrates that local velocity alignment among self-propelled particles spontaneously generates long-range orientational order in 2D, explaining collective motion in bird flocks, fish schools, and bacterial swarms through a minimal active matter model",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "N self-propelled particles with speed v0 aligning with neighbors within radius r undergo a continuous noise-driven phase transition at critical noise eta_c from a disordered gas phase (no net motion) to a polar ordered phase (coherent flock), with the order parameter phi = |sum(v_i)|/(N*v0) serving ",
      "open_unknowns": [],
      "related_hypotheses": [],
      "communication_gap": "Physicists study active matter phase transitions theoretically while biologists measure collective animal behavior empirically; quantitative comparison requires translating biophysical parameters (alignment strength, noise) into measurable behavioral metrics (nearest-neighbor correlation, speed distribution).",
      "translation_table": [
        {
          "field_a_term": "local velocity alignment rule theta_i(t+1) = <theta_j>_{|r_ij|<r} + noise",
          "field_b_term": "ferromagnetic spin alignment in XY model without detailed balance",
          "note": "Unlike equilibrium XY model, Vicsek particles self-propel, breaking time-reversal symmetry and allowing long-range order in 2D"
        },
        {
          "field_a_term": "noise parameter eta (angular deviation)",
          "field_b_term": "effective temperature in non-equilibrium system",
          "note": "Increasing eta destroys order; the eta_c transition is discontinuous (first-order) at large N, debated at small N"
        },
        {
          "field_a_term": "starling murmuration long-range correlations",
          "field_b_term": "scale-free correlation length xi >> r (interaction range)",
          "note": "Empirical correlations in starling flocks extend over the entire flock; consistent with proximity to criticality"
        },
        {
          "field_a_term": "bacterial collective motion (E. coli swarms)",
          "field_b_term": "Toner-Tu hydrodynamic theory for polar active fluids",
          "note": "Toner-Tu equations add advective nonlinearity to XY hydrodynamics; predict unique anomalous scaling exponents"
        }
      ],
      "references": [
        {
          "doi": "10.1103/PhysRevLett.75.1226",
          "note": "Vicsek et al. (1995) PRL - original Vicsek model paper demonstrating phase transition in collective motion"
        },
        {
          "doi": "10.1103/PhysRevLett.75.4326",
          "note": "Toner & Tu (1995) PRL - hydrodynamic theory of flocking: Toner-Tu equations"
        },
        {
          "doi": "10.1126/science.1215776",
          "note": "Cavagna et al. (2010) PNAS - scale-free correlations in starling flocks empirically measured"
        }
      ],
      "last_reviewed": "2026-05-07",
      "file": "cross-domain/physics-biology/b-vicsek-active-matter-flocking.yaml"
    },
    {
      "id": "b-casimir-polder-retardation-x-lifshitz-vdw-crossover",
      "title": "Casimir–Polder forces between polarizable atoms interpolate between nonretarded van der Waals (∝ R⁻⁶) and retarded (∝ R⁻⁷) power laws as electromagnetic retardation grows with separation — unified macroscopically by Lifshitz theory where frequency-dependent ε(ω) bridges short-range van der Waals and macroscopic Casimir pressures across material interfaces.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "Microscopic London dispersion merges into continuum Lifshitz/Casimir descriptions when multipolar fluctuations are integrated with proper causal Green functions — distance regimes distinguish **Casimir** as macroscopic QED boundary phenomenon versus **vdW** as molecular terminology — physically one ",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-joint-fit-lifshitz-hamaker-colloid-force-curves"
      ],
      "communication_gap": "Chemistry curricula emphasize pairwise Lennard-Jones tails while condensed-matter physics emphasizes Lifshitz/Casimir plates — graduate students may not realize retardation and screening unify within one spectral formalism.\n",
      "translation_table": [
        {
          "field_a_term": "Casimir–Polder pair potential between neutral atoms (retardation crossover)",
          "field_b_term": "distance regimes of London dispersion curves in molecular simulations",
          "note": "Same fluctuation origin; simulations often splice empirical potentials rather than full frequency integrals."
        },
        {
          "field_a_term": "Lifshitz integral over imaginary Matsubara frequencies ξ_n",
          "field_b_term": "Hamaker-style coarse-graining of pairwise dispersion between mesoscale bodies",
          "note": "Lifshitz reduces to Hamaker constants in dilute limits with modeling caveats."
        },
        {
          "field_a_term": "Casimir pressure between conducting plates in vacuum",
          "field_b_term": "solvation/London interactions screened by electrolytes in chemistry contexts",
          "note": "Screening and aqueous environments dramatically alter spectral ε(ω) paths — bridge highlights vacuum/clarified-limit pedagogy first."
        }
      ],
      "references": [
        {
          "doi": "10.1103/PhysRev.73.360",
          "note": "Casimir & Polder (1948) — retardation influence on London–van der Waals forces"
        },
        {
          "doi": "10.1103/PhysRevLett.118.266802",
          "note": "Venkataram et al. (2017) — unifying microscopic and continuum vdW/Casimir treatments (PRL)"
        }
      ],
      "last_reviewed": "2026-05-07",
      "file": "cross-domain/physics-chemistry/b-casimir-polder-retardation-x-lifshitz-vdw-crossover.yaml"
    },
    {
      "id": "b-catalytic-converter-surface-chemistry",
      "title": "The automotive catalytic converter is a physical chemistry masterpiece: Pt/Pd/Rh on alumina support simultaneously catalyzes three reactions via Langmuir-Hinshelwood surface chemistry, controlled within ±0.02 air-fuel ratio λ=1 by oxygen sensor feedback.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "The three-way catalytic converter (TWC) bridges gas-phase thermodynamics (engine exhaust chemistry) and surface science (heterogeneous catalysis). The three simultaneous reactions: (1) CO oxidation: 2CO + O₂ → 2CO₂; (2) hydrocarbon oxidation: CₓHᵧ + O₂ → CO₂ + H₂O; (3) NOₓ reduction: 2NO → N₂ + O₂. ",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-electric-catalyst-preheating-eliminates-cold-start-emissions"
      ],
      "communication_gap": "Automotive engineers who design TWC systems optimize empirically (lambda sweeps, aging protocols) without deep engagement with the surface science literature. Surface scientists studying CO oxidation on Pt single crystals under UHV rarely interface with the materials science of washcoat aging or the control systems engineering of lambda feedback loops.\n",
      "translation_table": [
        {
          "field_a_term": "lambda (λ) air-fuel equivalence ratio",
          "field_b_term": "surface oxygen coverage (chemical potential of adsorbed oxygen)",
          "note": "λ controls the balance between oxidizing and reducing surface conditions"
        },
        {
          "field_a_term": "Pt/Pd oxidation catalysis",
          "field_b_term": "Langmuir-Hinshelwood mechanism for CO* + O* → CO₂",
          "note": "Langmuir adsorption isotherm describes competitive CO and O adsorption on Pt"
        },
        {
          "field_a_term": "Rh NOₓ reduction",
          "field_b_term": "dissociative adsorption of NO → N* + O*; N* + N* → N₂",
          "note": "Rh uniquely stabilizes N* intermediate, enabling N-N coupling"
        },
        {
          "field_a_term": "light-off temperature T₅₀",
          "field_b_term": "Arrhenius activation energy for surface reaction becoming rate-limiting",
          "note": "cold-start problem is an activation energy problem; electric pre-heating solutions"
        },
        {
          "field_a_term": "sintering of PGM nanoparticles",
          "field_b_term": "Ostwald ripening (surface energy-driven coarsening)",
          "note": "loss of active surface area (BET) = catalyst deactivation"
        }
      ],
      "references": [
        {
          "note": "Heck & Farrauto (2002) Catalytic Air Pollution Control, 2nd ed.; Van Nostrand Reinhold"
        },
        {
          "doi": "10.1080/01614949408009465",
          "note": "Shelef & Graham (1994) Why rhodium in automotive three-way catalysts?; Catal Rev 36:433"
        },
        {
          "note": "Ertl et al. (1997) Handbook of Heterogeneous Catalysis; Wiley-VCH"
        },
        {
          "note": "Taylor (1993) Automobile catalytic converters; Catal Today 16:21"
        }
      ],
      "last_reviewed": "2026-05-07",
      "file": "cross-domain/physics-chemistry/b-catalytic-converter-surface-chemistry.yaml"
    },
    {
      "id": "b-electrochemical-energy-storage-conversion",
      "title": "Electrochemical energy devices — fuel cells, electrolyzers, and redox flow batteries — bridge electrochemistry and thermodynamics: the Gibbs free energy change ΔG = -nFE determines theoretical efficiency, while Butler-Volmer kinetics and Ohmic losses set practical limits, unifying chemical reaction thermodynamics with electrical energy conversion.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "Fuel cells convert chemical energy directly to electrical energy via electrochemical reactions, bypassing the Carnot efficiency limit that constrains heat engines. For the hydrogen fuel cell: H₂ + ½O₂ → H₂O, ΔG° = -237.1 kJ/mol. The maximum thermodynamic efficiency is η_max = ΔG/ΔH = 237.1/285.8 = 8",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-single-atom-catalyst-orr-selectivity-4e"
      ],
      "communication_gap": "Thermodynamics (19th century, Carnot-Kelvin-Gibbs tradition) and electrochemistry (Faraday-Nernst tradition) developed largely independently. Physical chemistry courses teach Nernst equation without connecting it to Butler-Volmer kinetics, which appears in electrochemical engineering courses. Materials scientists developing catalysts rarely engage with the thermodynamic efficiency framework; energy economists modeling hydrogen supply chains rarely understand the catalyst kinetics that constrain practical efficiency. The hydrogen economy discourse spans physics, chemistry, materials science, and economics — rarely synthesized in a single analytical framework.\n",
      "translation_table": [
        {
          "field_a_term": "Gibbs free energy change ΔG (chemical thermodynamics)",
          "field_b_term": "maximum electrical work W_max = nFE (electrochemistry)",
          "note": "ΔG = -nFE is the fundamental equation linking thermodynamics and electrochemistry"
        },
        {
          "field_a_term": "enthalpy of reaction ΔH (total heat content)",
          "field_b_term": "higher heating value (HHV) of fuel (total chemical energy content)",
          "note": "efficiency η = ΔG/ΔH; fuel cell is more efficient than combustion because ΔG < ΔH"
        },
        {
          "field_a_term": "electrode overpotential η (kinetic penalty)",
          "field_b_term": "internal resistance voltage drop (circuit analogy)",
          "note": "Butler-Volmer η reduces E from E° = 1.23V to ~0.7V at practical current densities"
        },
        {
          "field_a_term": "Nernst equation (equilibrium potential vs. concentration)",
          "field_b_term": "open-circuit voltage (OCV) of battery at given state of charge",
          "note": "both express thermodynamic driving force as function of species activities"
        },
        {
          "field_a_term": "exchange current density j₀ (Butler-Volmer parameter)",
          "field_b_term": "electrocatalyst activity (Tafel slope, turnover frequency)",
          "note": "j₀ encodes catalyst quality; Pt has high j₀ for HOR, poor for ORR — drives Pt-alloy research"
        }
      ],
      "references": [
        {
          "doi": "10.1080/14786443908562514",
          "note": "Grove (1839) On voltaic series and the combination of gases by platinum; Philos Mag 14:127"
        },
        {
          "note": "Appleby & Foulkes (1989) Fuel Cell Handbook. Van Nostrand Reinhold"
        },
        {
          "doi": "10.1149/2.015205jes",
          "note": "Skyllas-Kazacos et al. (2011) Progress in flow battery research and development; J Electrochem Soc 158:R55"
        },
        {
          "doi": "10.1126/science.1103197",
          "note": "Turner (2004) Sustainable hydrogen production; Science 305:972"
        }
      ],
      "last_reviewed": "2026-05-06",
      "file": "cross-domain/physics-chemistry/b-electrochemical-energy-storage-conversion.yaml"
    },
    {
      "id": "b-kramers-escape-rate-x-drift-diffusion-decision-threshold",
      "title": "Kramers escape over an activation barrier and drift-diffusion decision thresholds share a first-passage-time structure: noisy trajectories accumulate evidence or thermal energy until they cross a boundary, producing reaction-time or rate distributions.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "proposed",
      "bridge_claim": "This is a transfer analogy at the stochastic-process level, not a claim that cognitive decisions are chemical reactions. Barrier height, noise scale, and drift map onto threshold, sensory noise, and evidence accumulation in ways that can be tested through first-passage-time distributions.\n",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-reaction-time-tail-scales-with-effective-barrier-height"
      ],
      "communication_gap": "Chemical kinetics focuses on rates and barriers, while cognitive neuroscience emphasizes accuracy/reaction-time tradeoffs and fitted drift-diffusion parameters.\n",
      "translation_table": [
        {
          "field_a_term": "activation barrier height",
          "field_b_term": "decision threshold separation",
          "note": "Both set first-passage difficulty."
        },
        {
          "field_a_term": "thermal noise",
          "field_b_term": "sensory or internal decision noise",
          "note": "Noise scale shapes crossing times."
        },
        {
          "field_a_term": "escape-time distribution",
          "field_b_term": "reaction-time tail distribution",
          "note": "The tail is the testable bridge."
        }
      ],
      "references": [
        {
          "doi": "10.1016/S0031-8914(40)90098-2",
          "note": "Kramers (1940) Brownian motion in a field of force and reaction-rate model."
        },
        {
          "doi": "10.1037/0033-295X.85.2.59",
          "note": "Ratcliff (1978) diffusion model for reaction time."
        }
      ],
      "last_reviewed": "2026-05-07",
      "file": "cross-domain/physics-chemistry/b-kramers-escape-rate-x-drift-diffusion-decision-threshold.yaml"
    },
    {
      "id": "b-maxwell-boltzmann-chemical-kinetics",
      "title": "The Maxwell-Boltzmann speed distribution determines the fraction of molecules energetic enough to overcome activation barriers, directly deriving the Arrhenius equation and establishing statistical mechanics as the microscopic foundation of chemical kinetics.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "The Maxwell-Boltzmann distribution f(v) = 4π(m/2πkT)^(3/2) v² exp(-mv²/2kT) gives the probability that a molecule has speed v at temperature T. For a reaction to occur, the collision must supply at least the activation energy E_a. The fraction of molecules exceeding E_a is ∝ exp(-E_a/kT), which is e",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-activation-energy-mb-tail-universality"
      ],
      "communication_gap": "Physical chemistry curricula cover both the Maxwell-Boltzmann distribution and Arrhenius kinetics, but the derivation of one from the other is often omitted. Statistical mechanics courses treat f(v) as a physics result; kinetics courses treat the Arrhenius equation as an empirical law. The explicit derivation — that every Arrhenius rate constant is an integral over the Maxwell-Boltzmann tail — belongs in both curricula but appears in neither as a highlighted bridge.\n",
      "translation_table": [
        {
          "field_a_term": "Maxwell-Boltzmann distribution f(v)",
          "field_b_term": "collision energy distribution",
          "note": "The speed distribution maps to a collision energy distribution via E = ½mv²"
        },
        {
          "field_a_term": "Boltzmann factor exp(-E/kT)",
          "field_b_term": "Arrhenius exponential exp(-E_a/RT)",
          "note": "R = N_A · k; the Boltzmann factor is the single-molecule form of the Arrhenius factor"
        },
        {
          "field_a_term": "thermal equilibrium temperature T",
          "field_b_term": "reaction rate constant k(T)",
          "note": "Temperature controls both the width of f(v) and the rate constant via the same exp(-E/kT)"
        },
        {
          "field_a_term": "partition function Z",
          "field_b_term": "pre-exponential factor A",
          "note": "In TST, A comes from the ratio of partition functions of transition state to reactants"
        },
        {
          "field_a_term": "phase-space density",
          "field_b_term": "transition state ensemble",
          "note": "The transition state is the saddle point of the free-energy landscape — a phase-space dividing surface"
        }
      ],
      "references": [
        {
          "doi": "10.1039/tf9343000304",
          "note": "Eyring (1935) — transition state theory, deriving k = (kT/h) exp(-ΔG‡/RT)"
        },
        {
          "note": "Maxwell, J.C. (1860). Illustrations of the dynamical theory of gases. Phil. Mag. 19:19–32. -- Original Maxwell speed distribution derivation"
        },
        {
          "note": "Arrhenius, S. (1889). Über die Reaktionsgeschwindigkeit bei der Inversion von Rohrzucker. Z. Phys. Chem. 4:226–248. -- Empirical Arrhenius equation"
        },
        {
          "doi": "10.1063/1.1749604",
          "note": "Eyring (1935) J. Chem. Phys. 3:107 — absolute reaction rate theory"
        },
        {
          "note": "Atkins, P. & de Paula, J. (2010). Physical Chemistry (9th ed.). Oxford University Press. -- Textbook bridge between statistical mechanics and chemical kinetics"
        }
      ],
      "last_reviewed": "2026-05-06",
      "file": "cross-domain/physics-chemistry/b-maxwell-boltzmann-chemical-kinetics.yaml"
    },
    {
      "id": "b-nmr-quantum-coherence",
      "title": "Nuclear magnetic resonance is quantum coherence engineering at room temperature — the Bloch equations describe spin dynamics, Fourier transform spectroscopy extracts chemical structure, and 2D NMR correlation experiments exploit many-body quantum coherence to determine protein structures, making NMR the applied science where quantum mechanics became a routine analytical tool.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "NMR spectroscopy is the most successful application of quantum coherence in chemistry, underpinning both structural determination of molecules and MRI in medicine. Its physical basis is the manipulation of nuclear spin quantum states.\nPhysical foundation: nuclei with non-zero spin I (¹H: I=½, ¹³C: I",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-solid-state-nmr-amyloid-structure-mechanism"
      ],
      "communication_gap": "NMR developed in physics departments (Bloch at Stanford, Purcell at Harvard, both Nobel 1952) but migrated entirely to chemistry and medicine within a decade. The quantum mechanical density matrix description of NMR (Abragam 1961, Ernst's 1987 textbook) is standard in NMR spectroscopy but rarely taught in physics quantum mechanics courses, creating a gap where physicists approaching NMR must re-learn their own quantum mechanics in unfamiliar notation. MRI is clinically indispensable (>100 million scans/year globally) but the physics and quantum engineering behind it are largely invisible to radiologists and clinicians. The 2003 Nobel Prize to Lauterbur and Mansfield specifically acknowledged the medical impact of the physics-to-chemistry-to-medicine translation that NMR represents.\n",
      "translation_table": [
        {
          "field_a_term": "nuclear spin quantum state |m⟩ (I=½: |α⟩, |β⟩)",
          "field_b_term": "chemical shift resonance line in NMR spectrum",
          "note": "energy gap between states = Larmor frequency = chemical shift position in spectrum"
        },
        {
          "field_a_term": "T₁ longitudinal relaxation (spin-lattice)",
          "field_b_term": "recovery time after perturbation (determines pulse repetition rate)",
          "note": "T₁ ranges from milliseconds (small molecules) to seconds (large proteins)"
        },
        {
          "field_a_term": "T₂ transverse relaxation (spin-spin dephasing)",
          "field_b_term": "linewidth of NMR resonance (Δν = 1/πT₂)",
          "note": "large molecules tumble slowly → fast T₂ relaxation → broad lines → size limit for NMR"
        },
        {
          "field_a_term": "Hahn spin echo (180° refocusing pulse)",
          "field_b_term": "T₂ measurement free of B₀ inhomogeneity contributions",
          "note": "original quantum coherence refocusing — prototype for all refocusing in quantum information"
        },
        {
          "field_a_term": "density matrix coherence ρ_{mn} off-diagonal element",
          "field_b_term": "observable NMR signal (magnetisation rotating at Larmor frequency)",
          "note": "only single-quantum coherences (Δm=1) are directly observable; multi-quantum coherences used in 2D experiments"
        },
        {
          "field_a_term": "NOE (through-space, r⁻⁶ distance dependence)",
          "field_b_term": "distance restraints in 3D protein structure determination",
          "note": "NOESY cross-peaks give r < 5Å between proton pairs → structural restraints"
        },
        {
          "field_a_term": "gradient field G encoding spatial position as frequency",
          "field_b_term": "k-space Fourier encoding in MRI",
          "note": "same Fourier relationship as spectroscopy but over centimetre scale"
        }
      ],
      "references": [
        {
          "doi": "10.1103/PhysRev.70.460",
          "note": "Bloch et al. (1946) Phys Rev 70:460 — Bloch equations for NMR spin dynamics"
        },
        {
          "doi": "10.1103/PhysRev.80.580",
          "note": "Hahn (1950) Phys Rev 80:580 — spin echo discovery"
        },
        {
          "note": "Ernst, Bodenhausen & Wokaun (1987) Principles of NMR in One and Two Dimensions. Oxford University Press"
        },
        {
          "note": "Wüthrich (1986) NMR of Proteins and Nucleic Acids. Wiley-Interscience"
        }
      ],
      "last_reviewed": "2026-05-06",
      "file": "cross-domain/physics-chemistry/b-nmr-quantum-coherence.yaml"
    },
    {
      "id": "b-reaction-rate-transition-state",
      "title": "Eyring-Evans-Polanyi transition state theory (1935) derives reaction rate k = (k_BT/h)exp(-ΔG‡/RT) from statistical mechanics; Kramers' theory adds solvent friction (γ); Marcus theory gives the celebrated inverted region k ∝ exp[-(λ+ΔG°)²/4λk_BT] for electron transfer where faster thermodynamics can slow the rate — unifying statistical mechanics, chemical kinetics, and quantum tunneling through the concept of a rate-limiting transition state.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "Transition state theory (TST, Eyring-Evans-Polanyi 1935): reaction rate is k = (k_BT/h) · (Q‡/Q_R) · exp(-E‡/k_BT) where Q‡ is the partition function of the activated complex minus one degree of freedom (the reaction coordinate). Using free energies: k = (k_BT/h)exp(-ΔG‡/RT). Key assumptions: (1) qu",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-marcus-inverted-region-biological-electron-transfer"
      ],
      "communication_gap": "Transition state theory is taught in physical chemistry; Kramers theory in chemical physics; Marcus theory in electrochemistry and biophysics. These communities use different notation and rarely cross-cite. The connection of Marcus theory to the spin-boson model (condensed matter physics) is known to theorists but not to experimental chemists. Quantum biology connections (tunneling in enzymes) are highly contested.\n",
      "translation_table": [
        {
          "field_a_term": "transition state (activated complex, ‡) on the PES",
          "field_b_term": "saddle point of the potential energy landscape in statistical mechanics",
          "note": "transition state is a first-order saddle point in 3N-dimensional configuration space"
        },
        {
          "field_a_term": "activation free energy ΔG‡",
          "field_b_term": "barrier height in the Kramers escape problem",
          "note": "both determine the Arrhenius exponential suppression of the rate"
        },
        {
          "field_a_term": "solvent friction γ in Kramers theory",
          "field_b_term": "damping in the Langevin equation ṁẋ = -γẋ - dV/dx + ξ(t)",
          "note": "high friction → diffusion limit; low friction → energy-diffusion limited (Kramers turnover)"
        },
        {
          "field_a_term": "Marcus reorganization energy λ",
          "field_b_term": "harmonic bath coupling parameter in spin-boson model",
          "note": "λ measures how much the environment must rearrange upon charge transfer"
        },
        {
          "field_a_term": "Marcus inverted region (rate decreases at large -ΔG°)",
          "field_b_term": "quantum tunneling gap between initial and final state wavefunctions",
          "note": "deep inverted region rates are governed by nuclear tunneling, not classical over-barrier"
        }
      ],
      "references": [
        {
          "doi": "10.1063/1.1749604",
          "note": "Eyring (1935) The activated complex in chemical reactions. J Chem Phys 3:107–115"
        },
        {
          "doi": "10.1016/S0031-8914(40)90098-2",
          "note": "Kramers (1940) Brownian motion in a field of force and the diffusion model of chemical reactions. Physica 7:284–304"
        },
        {
          "doi": "10.1063/1.1742723",
          "note": "Marcus (1956) On the theory of oxidation-reduction reactions involving electron transfer. J Chem Phys 24:966–978"
        },
        {
          "doi": "10.1103/RevModPhys.62.251",
          "note": "Hänggi et al. (1990) Reaction-rate theory: fifty years after Kramers. Rev Mod Phys 62:251–341"
        }
      ],
      "last_reviewed": "2026-05-06",
      "file": "cross-domain/physics-chemistry/b-reaction-rate-transition-state.yaml"
    },
    {
      "id": "b-statistical-thermodynamics-equilibrium",
      "title": "Chemical equilibrium (K = exp(-ΔG°/RT)) is derived entirely from statistical thermodynamics: the equilibrium constant equals the ratio of molecular partition functions of products to reactants, making all of macroscopic chemical equilibrium a direct consequence of quantum mechanical energy level statistics.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "The equilibrium constant K = exp(-ΔG°/RT) derived from statistical thermodynamics: K = Z_products/Z_reactants where Z = Σ_i exp(-E_i/kT) is the molecular partition function summing over all quantum states. For ideal gases, Z = Z_trans × Z_rot × Z_vib × Z_elec — each factor derived from quantum mecha",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-statistical-thermodynamics-equilibrium-partition-function"
      ],
      "communication_gap": "Physical chemistry textbooks teach statistical thermodynamics in separate chapters from chemical equilibrium, obscuring the fact that K is a direct ratio of partition functions. Chemists using K rarely think about quantum energy levels; physicists studying partition functions rarely think about chemical applications.\n",
      "translation_table": [
        {
          "field_a_term": "partition function Z = Σ exp(-E_i/kT)",
          "field_b_term": "equilibrium constant K (ratio of Z_products/Z_reactants)",
          "note": "K is dimensionless ratio of product to reactant partition functions"
        },
        {
          "field_a_term": "Boltzmann factor exp(-E_i/kT)",
          "field_b_term": "relative population of reactant vs product states",
          "note": "at equilibrium, populations are Boltzmann-weighted"
        },
        {
          "field_a_term": "temperature T (thermal energy scale)",
          "field_b_term": "van't Hoff factor RT in ΔG° = -RT lnK",
          "note": "k_B and R differ only by Avogadro's number"
        },
        {
          "field_a_term": "quantum energy levels E_i of molecule",
          "field_b_term": "thermochemical tables (ΔH°_f, ΔG°_f, S°)",
          "note": "NIST Webbook tabulates partition-function-derived thermochemical quantities"
        }
      ],
      "references": [
        {
          "note": "Boltzmann (1877) — Boltzmann distribution; entropy as log of microstates"
        },
        {
          "note": "Gibbs (1902) Elementary Principles in Statistical Mechanics; Yale University Press"
        },
        {
          "note": "Atkins & de Paula (2010) Physical Chemistry; Oxford University Press"
        },
        {
          "note": "McQuarrie (2000) Statistical Mechanics; University Science Books"
        }
      ],
      "last_reviewed": "2026-05-06",
      "file": "cross-domain/physics-chemistry/b-statistical-thermodynamics-equilibrium.yaml"
    },
    {
      "id": "b-superconductivity-cooper-pairs",
      "title": "BCS theory unifies quantum mechanics and condensed-matter chemistry — phonon-mediated electron pairing overcomes Coulomb repulsion to produce macroscopic quantum coherence",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "The Bardeen-Cooper-Schrieffer (BCS) theory demonstrates a profound physics-chemistry bridge: electrons near the Fermi surface — despite their mutual Coulomb repulsion — can form bound Cooper pairs via phonon-mediated lattice distortions. The gap equation Δ ≈ 2ℏω_D·exp(-1/N(0)V) predicts both the ene",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-cuprate-pairing-spin-fluctuation-glue"
      ],
      "communication_gap": "Condensed matter physicists and solid-state chemists use different language for the same phenomena: physicists describe pairing in terms of Green's functions and self-energies; chemists use crystal field theory and molecular orbital diagrams. High-T_c research sits at this boundary — synthesis is done by chemists, theory by physicists — and the two communities have largely separate conferences (APS March Meeting vs. Materials Research Society). The pairing mechanism debate is prolonged partly by this community split.\n",
      "translation_table": [
        {
          "field_a_term": "Cooper pair (physics)",
          "field_b_term": "chemical bond (chemistry)",
          "note": "Both are two-electron states stabilized by an effective attractive interaction despite net Coulomb cost"
        },
        {
          "field_a_term": "phonon-mediated attraction N(0)V",
          "field_b_term": "electron-phonon coupling constant (vibrational spectroscopy)",
          "note": "The same lattice dynamics measured by IR/Raman spectroscopy sets T_c in conventional superconductors"
        },
        {
          "field_a_term": "superconducting energy gap Δ",
          "field_b_term": "HOMO-LUMO gap (molecular orbital theory)",
          "note": "Both represent the minimum energy cost to break a paired electron state"
        },
        {
          "field_a_term": "Meissner effect (perfect diamagnetism)",
          "field_b_term": "diamagnetic chemical shielding (NMR)",
          "note": "Both arise from induced screening currents, but Meissner is macroscopic and involves flux expulsion"
        },
        {
          "field_a_term": "condensate wavefunction ψ = |ψ|·e^{iφ}",
          "field_b_term": "molecular orbital (quantum chemistry)",
          "note": "Macroscopic quantum wavefunction vs. single-electron MO — both described by same Schrödinger formalism"
        }
      ],
      "references": [
        {
          "doi": "10.1103/PhysRev.108.1175",
          "note": "Bardeen, Cooper & Schrieffer (1957). Theory of Superconductivity. Phys Rev 108:1175."
        },
        {
          "doi": "10.1098/rspa.1935.0048",
          "note": "London & London (1935). The electromagnetic equations of the supraconductor. Proc R Soc A 149:71."
        },
        {
          "doi": "10.1007/BF01303701",
          "note": "Bednorz & Müller (1986). Possible high-Tc superconductivity in the Ba-La-Cu-O system. Z Phys B 64:189."
        },
        {
          "doi": "10.1126/science.1210519",
          "note": "Norman (2011). The challenge of unconventional superconductivity. Science 332:196."
        }
      ],
      "last_reviewed": "2026-05-06",
      "file": "cross-domain/physics-chemistry/b-superconductivity-cooper-pairs.yaml"
    },
    {
      "id": "b-transition-state-theory-kinetics",
      "title": "Transition state theory (Eyring 1935) and Kramers' escape rate (1940) unify chemical reaction kinetics, protein conformational dynamics, and ion channel gating as thermally activated first-passage over energy barriers",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "Transition state theory (Eyring, Evans & Polanyi 1935) describes chemical reactions as passage over a saddle point on the potential energy surface (PES): the rate constant k = (k_B T/h) exp(-ΔG‡/RT), where ΔG‡ is the free energy of the transition state (activated complex) relative to reactants. This",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-kramers-turnover-solvent-friction-controls-enzyme-reaction-rates"
      ],
      "communication_gap": "Physical chemists computing PES and biochemists studying enzyme kinetics rarely share mathematical frameworks. Transition state theory is taught in physical chemistry but Kramers theory is taught in statistical physics; the equivalence is rarely emphasised. Biophysicists studying protein folding often reinvent Kramers theory terminology.\n",
      "translation_table": [
        {
          "field_a_term": "transition state (saddle point on PES)",
          "field_b_term": "free energy barrier ΔG‡",
          "note": "Same object — the highest-free-energy point on the minimum energy path"
        },
        {
          "field_a_term": "Eyring rate k = (kT/h)exp(-ΔG‡/RT)",
          "field_b_term": "Kramers rate k = (ω_min ω_b / 2πγ)exp(-ΔE/kT)",
          "note": "Both are Arrhenius forms; Kramers adds friction and barrier curvature corrections"
        },
        {
          "field_a_term": "reaction coordinate",
          "field_b_term": "order parameter / progress variable",
          "note": "The collective coordinate along which the system moves from reactant to product basin"
        },
        {
          "field_a_term": "PES landscape (quantum chemistry, DFT)",
          "field_b_term": "free energy landscape (statistical mechanics, umbrella sampling)",
          "note": "Quantum PES averaged over thermal fluctuations gives the free energy surface"
        }
      ],
      "references": [
        {
          "note": "Eyring (1935) — transition state theory, activated complex",
          "doi": "10.1063/1.1749604"
        },
        {
          "note": "Evans & Polanyi (1935) — transition state theory in chemical kinetics",
          "doi": "10.1039/tf9353100875"
        },
        {
          "note": "Kramers (1940) — Brownian motion and escape over barriers",
          "doi": "10.1016/S0031-8914(40)90098-2"
        },
        {
          "note": "Hänggi, Talkner & Borkovec (1990) — reaction rate theory review",
          "doi": "10.1103/RevModPhys.62.251"
        }
      ],
      "last_reviewed": "2026-05-06",
      "file": "cross-domain/physics-chemistry/b-transition-state-theory-kinetics.yaml"
    },
    {
      "id": "b-transition-state-x-saddle-point",
      "title": "Transition state theory x Saddle point optimization — reaction rate as barrier crossing\n",
      "source_domain": "",
      "target_domain": "",
      "status": "proposed",
      "bridge_claim": "The chemical reaction rate in transition state theory is determined by the flux through the saddle point of the potential energy surface (the transition state); this is mathematically equivalent to finding the minimum energy path on a high-dimensional landscape via saddle point optimization — connec",
      "open_unknowns": [],
      "related_hypotheses": [],
      "communication_gap": "Physical chemists developing transition state theory and applied mathematicians developing saddle point optimization work with the same energy landscape geometry; the connection to machine learning loss landscapes (where saddle points govern training dynamics) was drawn explicitly only in 2014-2020, opening a new research direction.\n",
      "translation_table": [
        {
          "field_a_term": "transition state / saddle point (chemistry)",
          "field_b_term": "saddle point in optimization landscape (mathematics)",
          "note": "The transition state is the lowest-energy saddle point on the potential energy surface between reactant and product minima"
        },
        {
          "field_a_term": "activation energy Ea (chemistry)",
          "field_b_term": "barrier height at saddle point (optimization)",
          "note": "The Arrhenius activation energy is the energy difference between the saddle point and the reactant minimum"
        },
        {
          "field_a_term": "minimum energy path / reaction coordinate (chemistry)",
          "field_b_term": "gradient flow path through saddle point (mathematics)",
          "note": "The IRC (intrinsic reaction coordinate) follows steepest descent from saddle point to products/reactants, analogous to gradient flow in optimization"
        },
        {
          "field_a_term": "rate constant k = A * exp(-Ea/RT) (Eyring theory)",
          "field_b_term": "escape rate from metastable basin (Kramers theory)",
          "note": "Kramers' escape rate theory is the stochastic generalization of transition state theory, connecting chemistry to Langevin dynamics"
        }
      ],
      "references": [
        {
          "doi": "10.1063/1.1749604",
          "note": "Eyring (1935) - The activated complex in chemical reactions; J Chem Phys 3:107"
        }
      ],
      "last_reviewed": "2026-05-07",
      "file": "cross-domain/physics-chemistry/b-transition-state-x-saddle-point.yaml"
    },
    {
      "id": "b-van-der-waals-phase-transitions",
      "title": "The van der Waals equation is the prototype for all mean-field theories of phase transitions — its mathematical structure recurs across Landau theory",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "The van der Waals equation (p + a/V²)(V-b) = RT contains the essential mathematical structure of all mean-field phase transitions: a cubic equation of state, a double-well free energy below T_c, and an unphysical loop resolved by Maxwell construction. Near the critical point, it predicts mean-field ",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-van-der-waals-free-energy-double-well"
      ],
      "communication_gap": "Chemistry students learn the van der Waals equation as a practical equation of state correction; physics students learn Landau theory as abstract symmetry breaking. The explicit connection is made in graduate statistical mechanics courses but not in standard physical chemistry or introductory physics curricula.\n",
      "translation_table": [
        {
          "field_a_term": "molecular attraction parameter a",
          "field_b_term": "negative quadratic term in Landau free energy (-a|phi|^2)",
          "note": "Both provide the driving force for phase separation below T_c"
        },
        {
          "field_a_term": "excluded volume b",
          "field_b_term": "positive quartic term in Landau free energy (+b|phi|^4)",
          "note": "Both stabilize the ordered phase and prevent divergence"
        },
        {
          "field_a_term": "Maxwell construction (horizontal lever rule)",
          "field_b_term": "double-tangent construction for phase coexistence",
          "note": "Equal-area rule = common tangent = minimization of Gibbs free energy"
        },
        {
          "field_a_term": "critical exponent beta = 1/2 (mean field)",
          "field_b_term": "mean-field critical exponent for any scalar order parameter",
          "note": "Universal for mean-field theory; corrected by fluctuations (Wilson RG)"
        }
      ],
      "references": [
        {
          "note": "van der Waals, J.D. (1873). Over de continuïteit van den gas- en vloeistoftoestand. PhD thesis, Leiden."
        },
        {
          "doi": "10.1038/011357a0",
          "note": "Maxwell (1875). On the dynamical evidence of the molecular constitution of bodies. Nature 11:357."
        },
        {
          "note": "Landau, L.D. (1937). On the theory of phase transitions. Phys Z Sowjetunion 11:26."
        },
        {
          "note": "Stanley, H.E. (1971). Introduction to Phase Transitions and Critical Phenomena. Oxford University Press."
        }
      ],
      "last_reviewed": "2026-05-06",
      "file": "cross-domain/physics-chemistry/b-van-der-waals-phase-transitions.yaml"
    },
    {
      "id": "b-xray-crystallography-structure",
      "title": "Bragg's law nλ = 2d sinθ bridges X-ray physics (diffraction from crystal planes) to chemical structure determination (electron density maps via Fourier inversion), with the phase problem as the central mathematical obstacle whose solutions (isomorphous replacement, anomalous diffraction, molecular replacement) enabled the determination of insulin, vitamin B12, and DNA double helix structures.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "Bragg's law nλ = 2d sinθ (1913) established that X-rays constructively interfere when the path length difference 2d sinθ equals an integer multiple of the wavelength — a purely physical result about wave diffraction from periodic planes with spacing d. The structure factor F_{hkl} = Σⱼ fⱼ exp(2πi(hx",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-cryo-em-supersedes-xray-membrane-proteins"
      ],
      "communication_gap": "Physicists who study X-ray diffraction from the wave optics perspective rarely interact with structural biologists who use it to determine protein structures. The mathematical infrastructure (reciprocal space, Fourier methods, Bayesian refinement) is shared but has developed independently in each community, with parallel but disconnected literatures in Acta Crystallographica (chemistry/biology) and Physical Review (physics). The Nobel history spans Physics (Laue, Bragg) and Chemistry (Hodgkin) committees — reflecting the genuine disciplinary boundary.\n",
      "translation_table": [
        {
          "field_a_term": "Bragg diffraction condition nλ = 2d sinθ",
          "field_b_term": "reciprocal lattice vector (hkl) indexing crystal planes",
          "note": "Physics gives the condition; crystallography converts angles to (hkl) Miller indices"
        },
        {
          "field_a_term": "structure factor F_{hkl} (Fourier coefficient)",
          "field_b_term": "electron density Fourier component at reciprocal lattice point",
          "note": "F is measured (amplitude) but phase is lost — the central challenge of crystallography"
        },
        {
          "field_a_term": "Fourier transform (continuous)",
          "field_b_term": "crystallographic Fourier synthesis (electron density map)",
          "note": "The discrete Fourier sum over Miller indices gives the continuous electron density"
        },
        {
          "field_a_term": "anomalous dispersion (near absorption edge)",
          "field_b_term": "SAD/MAD phasing method",
          "note": "Wavelength-dependent f'' component provides phase information without isomorphous replacement"
        },
        {
          "field_a_term": "Patterson function P(u,v,w) = |F|² transform",
          "field_b_term": "vector map of interatomic distances (heavy atom location)",
          "note": "Patterson map locates heavy atom positions without phases — first step of MIR/SIR"
        },
        {
          "field_a_term": "R-factor = Σ|F_obs - F_calc|/ΣF_obs",
          "field_b_term": "crystallographic refinement convergence metric",
          "note": "R_free (cross-validated) distinguishes model improvement from overfitting"
        }
      ],
      "references": [
        {
          "note": "Bragg (1913) — The Diffraction of Short Electromagnetic Waves by a Crystal",
          "doi": "10.1098/rspa.1913.0084"
        },
        {
          "note": "Blow & Crick (1959) — The treatment of errors in the isomorphous replacement method",
          "doi": "10.1107/S0365110X59000299"
        },
        {
          "note": "Taylor (2003) — The phase problem",
          "doi": "10.1107/S0907444903009812"
        },
        {
          "note": "Drenth (2007) — Principles of Protein X-ray Crystallography (textbook reference)",
          "url": "https://link.springer.com/book/10.1007/978-0-387-33746-6"
        }
      ],
      "last_reviewed": "2026-05-06",
      "file": "cross-domain/physics-chemistry/b-xray-crystallography-structure.yaml"
    },
    {
      "id": "b-climate-tipping-percolation",
      "title": "Tipping points in Earth's climate system are mathematically equivalent to percolation phase transitions in disordered networks",
      "source_domain": "",
      "target_domain": "",
      "status": "proposed",
      "bridge_claim": "Climate tipping elements (AMOC, permafrost, ice sheets) exhibit saddle-node bifurcations whose mathematical structure is identical to the second-order phase transition in percolation theory on heterogeneous networks. The critical forcing threshold maps to the percolation threshold p_c, and the power",
      "open_unknowns": [],
      "related_hypotheses": [],
      "communication_gap": "Climate scientists use dynamical systems theory (bifurcation analysis) while statistical physicists use percolation and field theory. The communities publish in different journals and attend different conferences. The mathematical equivalence is recognised by a small number of complexity scientists but has not been systematically exploited.\n",
      "translation_table": [
        {
          "field_a_term": "climate tipping element",
          "field_b_term": "percolation cluster",
          "note": "Each tipping element is a node; coupling between elements is an edge with weight proportional to teleconnection strength"
        },
        {
          "field_a_term": "critical forcing threshold",
          "field_b_term": "percolation threshold p_c",
          "note": "Both mark the transition from isolated change to system-wide cascade"
        },
        {
          "field_a_term": "tipping cascade",
          "field_b_term": "giant connected component formation",
          "note": "A cascade of tipping elements is analogous to the formation of the giant percolation cluster above p_c"
        },
        {
          "field_a_term": "early warning signals (variance, autocorrelation)",
          "field_b_term": "diverging correlation length near critical point",
          "note": "Critical slowing down near tipping points corresponds to correlation length divergence in percolation"
        }
      ],
      "references": [],
      "last_reviewed": "2026-05-05",
      "file": "cross-domain/physics-climate/b-climate-tipping-percolation.yaml"
    },
    {
      "id": "b-tipping-points-phase-transitions",
      "title": "Climate tipping points are formal thermodynamic phase transitions — the Amazon dieback, Arctic sea ice loss, Atlantic circulation collapse, and permafrost carbon release each correspond to a specific bifurcation class (fold, Hopf, transcritical), and condensed-matter physics provides a century of analytical early-warning indicators that climate science has not systematically imported.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "proposed",
      "bridge_claim": "In condensed-matter physics, phase transitions are classified by their bifurcation structure: first-order transitions have hysteresis and latent heat; second-order transitions have diverging correlation length and critical slowing down; fold bifurcations produce irreversible state changes with no re",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-amoc-fold-bifurcation-ew"
      ],
      "communication_gap": "The IPCC Sixth Assessment Report (2021) uses the term \"tipping point\" 171 times and \"bifurcation\" 12 times, but \"phase transition\" appears only in the context of physical state changes (liquid to solid), never in the dynamical-systems or statistical-physics sense. Climate scientists have independently developed EWI theory (Scheffer 2009, Lenton 2011) based on dynamical systems bifurcation theory, but have not connected it to the condensed-matter physics literature that provides: (a) universality classes, (b) exact exponents, (c) spatial EWIs, (d) optimal detection statistics. The two communities publish in Nature Climate Change vs Physical Review — journals with essentially no shared readership.\n",
      "translation_table": [
        {
          "field_a_term": "fold bifurcation (first-order transition, hysteresis)",
          "field_b_term": "Amazon forest/savanna bistability; AMOC on/off states; ice sheet glaciation/deglaciation"
        },
        {
          "field_a_term": "second-order phase transition (critical slowing down, nu exponent)",
          "field_b_term": "Arctic sea ice thinning approaching seasonal ice-free state"
        },
        {
          "field_a_term": "Hopf bifurcation (oscillation onset)",
          "field_b_term": "ENSO amplitude changes; monsoon system bistability"
        },
        {
          "field_a_term": "early-warning indicator — rising lag-1 autocorrelation (AR1)",
          "field_b_term": "slowing recovery from perturbations in climate time series before tipping"
        },
        {
          "field_a_term": "early-warning indicator — rising variance (sigma^2)",
          "field_b_term": "increasing variability in rainfall, sea ice extent, or AMOC fingerprint before collapse"
        },
        {
          "field_a_term": "critical slowing down (eigenvalue approaching zero)",
          "field_b_term": "slowing of climate system's return to equilibrium after volcanic forcing or ENSO events"
        },
        {
          "field_a_term": "spatial correlation length divergence at second-order transition",
          "field_b_term": "growing spatial coherence of vegetation stress or sea-surface temperature anomalies before tipping"
        },
        {
          "field_a_term": "nucleation (first-order transition initiated by fluctuation above barrier)",
          "field_b_term": "local Amazon deforestation patch triggering regional forest collapse above a critical patch size"
        }
      ],
      "references": [
        {
          "doi": "10.1038/461472a",
          "note": "Lenton et al. (2008) — defining climate tipping elements; qualitative bifurcation classification"
        },
        {
          "doi": "10.1073/pnas.0805172105",
          "note": "Scheffer et al. (2009) — early warning signals before critical transitions; AR1 and variance EWIs"
        },
        {
          "doi": "10.1126/science.1225244",
          "note": "Scheffer et al. (2012) — anticipating critical transitions; spatial EWIs and universality"
        },
        {
          "doi": "10.1038/s41586-021-03796-w",
          "note": "Boers (2021) — AMOC approaching tipping point; AR1 and variance rising in fingerprint data"
        },
        {
          "doi": "10.1126/sciadv.abn7950",
          "note": "Boulton et al. (2022) — Amazon resilience declining since 2000; AR1 signature in satellite NDVI"
        },
        {
          "doi": "10.1103/RevModPhys.49.435",
          "note": "Wilson (1979) — Nobel lecture on renormalization group; universality and critical exponents — the physics toolkit climate science has not imported"
        }
      ],
      "last_reviewed": "2026-05-04",
      "file": "cross-domain/physics-climate/b-tipping-points-phase-transitions.yaml"
    },
    {
      "id": "b-entropy-conscious-experience",
      "title": "Integrated information theory (Tononi 2004) quantifies consciousness as Φ — the information generated by a system above and beyond its parts — while Friston's free energy principle connects conscious inference to entropy minimization, together posing the deepest open question about the relationship between physical entropy and phenomenal experience.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "proposed",
      "bridge_claim": "Integrated information theory (IIT; Tononi 2004) defines consciousness as Φ, the amount of irreducible integrated information: the effective information generated by the whole system above and beyond the sum of its parts. Φ is computed from the cause-effect structure of a system and represents how m",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-integrated-information-entropy-equivalence"
      ],
      "communication_gap": "IIT is debated primarily among neuroscientists, philosophers of mind, and computational neuroscientists in specialist journals and workshops. Thermodynamicists and statistical physicists rarely engage with the consciousness literature. Friston's free energy principle is mathematically sophisticated (variational Bayes, Laplace approximations) and primarily read by theoretical neuroscientists who rarely connect it to thermodynamics. The hard problem of consciousness (why any physical process gives rise to subjective experience) means the empirical stakes of these formalisms are philosophically contested.\n",
      "translation_table": [
        {
          "field_a_term": "thermodynamic entropy S = k_B ln Ω",
          "field_b_term": "phenomenal richness (number of distinguishable conscious states)",
          "note": "IIT posits that high Φ systems can have many distinguishable experiences — analogous to high entropy"
        },
        {
          "field_a_term": "integrated information Φ",
          "field_b_term": "degree of consciousness (scalar measure of experience)",
          "note": "Φ > 0 is claimed necessary and sufficient for any degree of consciousness"
        },
        {
          "field_a_term": "variational free energy F = KL(q||p) - log P(y)",
          "field_b_term": "the brain's prediction error (surprisal) about sensory inputs",
          "note": "Friston — brains minimize F; this is equivalent to approximate Bayesian inference"
        },
        {
          "field_a_term": "partition function Z (sum over states)",
          "field_b_term": "Bayesian model evidence (marginal likelihood) in brain inference"
        },
        {
          "field_a_term": "irreversibility (entropy production)",
          "field_b_term": "the directionality and temporal binding of conscious experience"
        },
        {
          "field_a_term": "Maxwell's demon (information and thermodynamic work)",
          "field_b_term": "selective attention as thermodynamic work done on sensory information"
        }
      ],
      "references": [
        {
          "doi": "10.1186/1471-2202-5-42",
          "note": "Tononi (2004) — An information integration theory of consciousness; BMC Neurosci 5:42"
        },
        {
          "doi": "10.1038/nrn.2016.44",
          "note": "Tononi et al. (2016) — Integrated information theory; from consciousness to its physical substrate; Nat Rev Neurosci 17:450"
        },
        {
          "doi": "10.1038/nrn2787",
          "note": "Friston (2010) — The free-energy principle; a unified brain theory?; Nat Rev Neurosci 11:127"
        },
        {
          "note": "Chalmers (1996) — The Conscious Mind; Oxford University Press"
        }
      ],
      "last_reviewed": "2026-05-06",
      "file": "cross-domain/physics-cognitive-science/b-entropy-conscious-experience.yaml"
    },
    {
      "id": "b-self-organized-criticality",
      "title": "Self-organized criticality (SOC) ↔ power-law distributions in brains, earthquakes, forest fires, and extinctions",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "Bak, Tang & Wiesenfeld (1987) showed that a sandpile model — where grains are added one at a time and avalanches redistribute them — spontaneously evolves to a critical state without any tuning of parameters. At criticality, avalanche sizes follow a power law P(s) ~ s^{-3/2} (the BTW exponent). This",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-criticality-conscious-integration",
        "h-grokking-criticality-universality"
      ],
      "communication_gap": "Bak et al. published in Physical Review Letters (1987) for physicists. Beggs & Plenz published in Journal of Neuroscience (2003) for neuroscientists. Gutenberg & Richter published in Bulletin of the Seismological Society (1944) for geophysicists. Each community developed the power-law analysis independently. The unification under SOC was proposed by Bak's 1996 book \"How Nature Works\" but is still not standard curriculum in neuroscience or ecology programs.\n",
      "translation_table": [
        {
          "field_a_term": "grain addition (sandpile)",
          "field_b_term": "synaptic input / tectonic stress / biomass accumulation",
          "note": "Slow external driving that builds up stored energy/state"
        },
        {
          "field_a_term": "avalanche (toppling)",
          "field_b_term": "neural burst / earthquake / fire / extinction cascade",
          "note": "Rapid redistribution event; size follows power law at criticality"
        },
        {
          "field_a_term": "BTW exponent -3/2",
          "field_b_term": "neuronal avalanche exponent -3/2 (Beggs & Plenz 2003)",
          "note": "Same exponent — not approximately, but within measurement error"
        },
        {
          "field_a_term": "critical slowing down near p_c",
          "field_b_term": "long-range temporal correlations in brain / seismicity",
          "note": "1/f noise spectrum is the hallmark of SOC"
        },
        {
          "field_a_term": "dissipation at boundary",
          "field_b_term": "homeostatic plasticity / tectonic plate motion / fire suppression",
          "note": "What sets the stationary state; changes the universality class"
        }
      ],
      "references": [
        {
          "doi": "10.1103/PhysRevLett.59.381",
          "note": "Bak, Tang & Wiesenfeld (1987) PRL — original SOC paper"
        },
        {
          "doi": "10.1523/JNEUROSCI.23-35-11167.2003",
          "note": "Beggs & Plenz (2003) J.Neurosci. — neuronal avalanches with -3/2 exponent"
        },
        {
          "doi": "10.1038/359679a0",
          "note": "Gutenberg & Richter (1944/1992) — earthquake magnitude-frequency law"
        },
        {
          "doi": "10.1016/0375-9601(96)00036-8",
          "note": "Sole & Manrubia (1996) Phys Lett A — extinction event SOC"
        }
      ],
      "last_reviewed": "2026-05-05",
      "file": "cross-domain/physics-complexity/b-self-organized-criticality.yaml"
    },
    {
      "id": "b-ising-model-x-hopfield-network",
      "title": "Ising model x Hopfield network — spin glass as associative memory\n",
      "source_domain": "",
      "target_domain": "",
      "status": "proposed",
      "bridge_claim": "The Hopfield neural network for associative memory is exactly the Ising spin glass model; stored memories correspond to local energy minima, retrieval is energy minimization, and the network's memory capacity is set by the spin-glass phase boundary.\n",
      "open_unknowns": [],
      "related_hypotheses": [],
      "communication_gap": "Hopfield published independently in 1982, unaware that his model was the spin glass Hamiltonian already studied by Edwards and Anderson (1975). The connection was recognized by Amit, Gutfreund & Sompolinsky (1985) but remains a pedagogical gap.\n",
      "translation_table": [
        {
          "field_a_term": "Ising spin configuration",
          "field_b_term": "Neural activity pattern",
          "note": "Each spin (+1/-1) corresponds to a neuron's firing state (active/silent); the spin glass Hamiltonian is identical to the Hopfield energy function.\n"
        },
        {
          "field_a_term": "Spin glass frozen state (local energy minimum)",
          "field_b_term": "Stored memory attractor",
          "note": "Both are metastable states of the same energy landscape; spin glass frustration corresponds to memory interference in the Hopfield network.\n"
        }
      ],
      "references": [
        {
          "doi": "10.1073/pnas.79.8.2554",
          "note": "Hopfield (1982) — neural networks and physical systems with emergent collective computational abilities"
        }
      ],
      "last_reviewed": "2026-05-07",
      "file": "cross-domain/physics-computer-science/b-ising-model-x-hopfield-network.yaml"
    },
    {
      "id": "b-quantum-annealing-optimization",
      "title": "Quantum annealing exploits quantum tunneling to escape optimisation local minima, mapping NP-hard combinatorial problems onto Ising Hamiltonians solved by adiabatic quantum evolution.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "proposed",
      "bridge_claim": "Quantum annealing (Kadowaki & Nishimori 1998) uses quantum tunneling through energy barriers rather than thermal fluctuations (classical simulated annealing) to find global minima of cost functions. The tunneling amplitude Γ plays the formal role of temperature T: as Γ → 0, the system localises in t",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-quantum-annealing-qaoa-comparison"
      ],
      "communication_gap": "Quantum physics and computer science have different communities. Theoretical computer scientists focus on worst-case complexity; physicists focus on typical-case behaviour. The question of quantum speedup requires language from both communities (complexity classes from CS, spectral gap theory from physics), which is rarely taught across the disciplinary boundary.\n",
      "translation_table": [
        {
          "field_a_term": "tunneling amplitude Γ",
          "field_b_term": "temperature T in simulated annealing",
          "note": "Both drive escape from local minima; quantum tunneling is exponentially faster through thin barriers"
        },
        {
          "field_a_term": "Ising Hamiltonian ground state",
          "field_b_term": "optimal solution of combinatorial problem",
          "note": "Problem variables map to spin-1/2 degrees of freedom; interaction strengths encode cost function"
        },
        {
          "field_a_term": "quantum phase transition at minimum spectral gap",
          "field_b_term": "algorithmic hardness bottleneck",
          "note": "First-order QPT (avoided level crossing) creates exponentially small gap; annealing fails"
        },
        {
          "field_a_term": "adiabatic theorem condition",
          "field_b_term": "time complexity requirement",
          "note": "Annealing time must scale as 1/Δ_min²; if gap exponentially small, QA provides no speedup"
        }
      ],
      "references": [
        {
          "doi": "10.1103/PhysRevE.58.5355",
          "note": "Kadowaki, T. & Nishimori, H. (1998). Quantum annealing in the transverse Ising model. Phys Rev E 58:5355."
        },
        {
          "arxiv": "quant-ph/0001106",
          "note": "Farhi, E. et al. (2000). Quantum computation by adiabatic evolution."
        },
        {
          "doi": "10.1038/nature10012",
          "note": "Johnson, M.W. et al. (2011). Quantum annealing with manufactured spins. Nature 473:194."
        },
        {
          "doi": "10.3389/fphy.2014.00005",
          "note": "Lucas, A. (2014). Ising formulations of many NP problems. Front Phys 2:5."
        }
      ],
      "last_reviewed": "2026-05-06",
      "file": "cross-domain/physics-computer-science/b-quantum-annealing-optimization.yaml"
    },
    {
      "id": "b-quantum-zeno-x-watchdog-sampling-analogy",
      "title": "Frequent projective measurement in the quantum Zeno effect freezes coherent evolution by collapsing survival probability toward unity when interrogations occur faster than the intrinsic transition rate — a discrete-time template analogous (only analogically) to microcontroller watchdog timers and control-loop sampling that repeatedly reset or observe state to prevent runaway dynamics.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "proposed",
      "bridge_claim": "Quantum survival amplitude after N measurements scales roughly as (1 − ΓΔt)^N for short intervals Δt, motivating exponential-in-(measurement rate) suppression resembling heuristic reliability gains when watchdog kicks arrive faster than fault accumulation timescales — **no claim that firmware execut",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-student-transfer-zeno-curve-to-sampling-stability-drills"
      ],
      "communication_gap": "Quantum optics textbooks derive Zeno scalings with projection operators while embedded-systems courses discuss watchdogs without dynamical-system hazard rates — practitioners rarely share notation even when designing ion-trap experiments versus flight software.\n",
      "translation_table": [
        {
          "field_a_term": "measurement interval Δt versus coupling strength / transition rate Γ",
          "field_b_term": "watchdog period versus estimated worst-case fault dwell time",
          "note": "Both tune cadence relative to an intrinsic rate — purely heuristic correspondence."
        },
        {
          "field_a_term": "survival probability under repeated projections (Zeno suppression)",
          "field_b_term": "probability of retaining safe controller state when resets preempt faults",
          "note": "Formal mathematics differs (quantum probability vs reliability engineering)."
        },
        {
          "field_a_term": "quantum anti-Zeno acceleration regime at intermediate rates",
          "field_b_term": "sampling too slowly can miss faults; oversampling may waste CPU — analogous trade curve only at qualitative level",
          "note": "Anti-Zeno physics is subtle; embedded scheduling tradeoffs are deterministic plus stochastic failure models — analogy must stay qualitative."
        }
      ],
      "references": [
        {
          "doi": "10.1103/PhysRevLett.74.1259",
          "note": "Itano et al. (1995) — experimental quantum Zeno effect"
        },
        {
          "doi": "10.1103/PhysRevA.41.2295",
          "note": "Facchi & Pascazio (1990) — quantum Zeno dynamics framework"
        }
      ],
      "last_reviewed": "2026-05-07",
      "file": "cross-domain/physics-computer-science/b-quantum-zeno-x-watchdog-sampling-analogy.yaml"
    },
    {
      "id": "b-renormalization-group-flow-x-deep-network-layer-coarse-graining",
      "title": "Renormalization group narratives bridge coarse-graining in theoretical physics with informal analogies between depth and progressive feature abstraction in deep neural networks.",
      "source_domain": "",
      "target_domain": "",
      "status": "contested",
      "bridge_claim": "Pedagogical bridge (widely discussed, contested as literal identification): layerwise feature transformations resemble iterative coarse-graining because both discard microscopic degrees of freedom while preserving selected relevant operators; explicit speculative analogy—training schedules that stab",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-beta-scheduled-layer-wise-training-mimics-rg-stability"
      ],
      "communication_gap": "Physics RG carries asymptotic rigor in restricted settings while ML depth phenomena are architecture- and data-dependent without a single flow equation.",
      "translation_table": [
        {
          "field_a_term": "relevant operators",
          "field_b_term": "stable features / transfer-ready representations",
          "note": "Informal correspondence without guaranteed universality classes."
        },
        {
          "field_a_term": "irrelevant operators",
          "field_b_term": "nuisance variability damped by depth",
          "note": "Depth can also amplify spurious correlations—breaks naive RG metaphor."
        },
        {
          "field_a_term": "fixed-point analysis",
          "field_b_term": "scaling-law / plateau behaviors in training",
          "note": "Phenomenology overlaps without shared equations of motion."
        }
      ],
      "references": [
        {
          "arxiv": "1410.3831",
          "note": "Widely cited ML–physics discussion probing depth-related phenomena with RG-flavored language (interpret as analogy literature, not universal proof)."
        }
      ],
      "last_reviewed": "2026-05-09",
      "file": "cross-domain/physics-computer-science/b-renormalization-group-flow-x-deep-network-layer-coarse-graining.yaml"
    },
    {
      "id": "b-restricted-boltzmann-machine-x-ising-energy-based-models",
      "title": "Restricted Boltzmann machines explicitly instantiate energy-based graphical models whose equilibrium statistics resemble Ising-like Boltzmann distributions used in statistical physics pedagogy.",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "Established modeling correspondence: RBMs define bipartite energy functions whose Gibbs distribution parallels Boltzmann weights on interacting latent-visible spins up to representation choices; speculative analogy for training dynamics—optimization trajectories need not mirror physical equilibratio",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-annealed-rbm-pretraining-improves-phase-diagram-discovery"
      ],
      "communication_gap": "Physics emphasizes phase transitions and mixed phases while ML venues emphasize downstream task performance without thermodynamic interpretation consistency.",
      "translation_table": [
        {
          "field_a_term": "energy function E(v,h)",
          "field_b_term": "Hamiltonian / Ising energy on effective spins",
          "note": "Constants absorbed into temperature conventions differ by convention."
        },
        {
          "field_a_term": "partition function",
          "field_b_term": "normalizing constant / free-energy landscape",
          "note": "Intractability mirrors physics models."
        },
        {
          "field_a_term": "contrastive divergence updates",
          "field_b_term": "short-time relaxation approximations",
          "note": "Algorithmic approximation introduces ML-specific bias."
        }
      ],
      "references": [
        {
          "url": "https://www.cs.toronto.edu/~hinton/absps/guideTR.pdf",
          "note": "Practical guide to training RBMs (canonical ML-side reference document)."
        }
      ],
      "last_reviewed": "2026-05-09",
      "file": "cross-domain/physics-computer-science/b-restricted-boltzmann-machine-x-ising-energy-based-models.yaml"
    },
    {
      "id": "b-spin-glass-replica-optimization",
      "title": "The replica method from spin-glass theory exactly characterizes the typical-case complexity of random constraint satisfaction problems, revealing phase transitions from easy to hard to unsatisfiable regimes that govern practical algorithm performance",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "The free energy of an Ising spin glass with random couplings, computed via the replica trick and replica-symmetry breaking (RSB) ansatz, maps exactly onto the satisfiability threshold of random k-SAT at clause-to-variable ratio alpha_c, predicting the hard-easy phase transition that explains why sea",
      "open_unknowns": [],
      "related_hypotheses": [],
      "communication_gap": "Complexity theorists study worst-case hardness while statistical physicists analyze typical-case phase transitions; the replica/cavity method bridges typical-case analysis (physics) to algorithm design (CS), but the mathematical formalism is non-rigorous and rarely taught in CS programs.",
      "translation_table": [
        {
          "field_a_term": "spin glass free energy F = -kT ln Z",
          "field_b_term": "log-partition function of random k-SAT formula",
          "note": "Both are quenched averages over random disorder; the replica trick averages ln Z = lim_{n->0} (Z^n - 1)/n"
        },
        {
          "field_a_term": "replica-symmetry breaking (RSB)",
          "field_b_term": "exponentially many metastable solution clusters in k-SAT",
          "note": "RSB signals that solutions cluster into exponentially many well-separated groups; search algorithms get trapped between clusters"
        },
        {
          "field_a_term": "Parisi order parameter q(x)",
          "field_b_term": "overlap distribution P(q) between random solutions",
          "note": "P(q) = delta(q) (RS phase, easy) or broad distribution (RSB phase, hard)"
        },
        {
          "field_a_term": "SAT/UNSAT phase transition alpha_c",
          "field_b_term": "thermodynamic phase transition in spin glass free energy",
          "note": "alpha_c for 3-SAT ~ 4.267; exactly predicted by cavity method (belief propagation at zero temperature)"
        }
      ],
      "references": [
        {
          "doi": "10.1126/science.1073287",
          "note": "Mezard et al. (2002) Science - replica and cavity method predict 3-SAT threshold"
        },
        {
          "doi": "10.1073/pnas.90.22.10844",
          "note": "Kirkpatrick & Selman (1994) Science - phase transition in random 3-SAT"
        },
        {
          "doi": "10.1007/978-3-540-24605-3_37",
          "note": "Mezard & Montanari (2009) - Information, Physics, Computation: RSB and belief propagation"
        }
      ],
      "last_reviewed": "2026-05-07",
      "file": "cross-domain/physics-computer-science/b-spin-glass-replica-optimization.yaml"
    },
    {
      "id": "b-quantum-error-correction-topology",
      "title": "Topological quantum error-correcting codes (Kitaev's toric code) are physically realized as Z2 lattice gauge theories whose ground states are topological phases of matter — bridging quantum information theory, condensed-matter physics, and high-energy gauge theory via the shared language of anyons, topological order, and ground-state degeneracy on non-trivial manifolds.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "Kitaev's toric code (2003) is simultaneously: (A) A quantum error-correcting code with macroscopic code distance, where logical\n    qubits are encoded in global topological degrees of freedom immune to local\n    errors;\n(B) A Z2 lattice gauge theory (the simplest deconfined gauge theory in 2+1D),\n  ",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-topological-phase-qec-threshold-correspondence"
      ],
      "communication_gap": "Quantum error correction research lives primarily in quantum information/computing conferences (QIP, IEEE QW). Topological condensed matter research lives in physical review journals (PRB, PRL, Nature Physics). Gauge theory lives in high-energy theory (JHEP, PRD). Despite the mathematical identity of these three perspectives, cross-citation is limited. Experimentalists building topological qubits (Google, Microsoft, IBM) rarely cite the condensed-matter topological order literature from the same year, and vice versa.\n",
      "translation_table": [
        {
          "field_a_term": "logical qubit (encoded in toric code)",
          "field_b_term": "ground state degeneracy of topological phase (depends on genus of manifold)"
        },
        {
          "field_a_term": "code distance (errors required to cause logical error)",
          "field_b_term": "topological protection (local perturbations cannot change topological invariant)"
        },
        {
          "field_a_term": "stabilizer generators (plaquette and vertex operators)",
          "field_b_term": "Z2 gauge-invariant operators (magnetic and electric flux)"
        },
        {
          "field_a_term": "anyon e (code error: flipped vertex stabilizer)",
          "field_b_term": "gauge charge (electric charge in Z2 gauge theory)"
        },
        {
          "field_a_term": "anyon m (code error: flipped plaquette stabilizer)",
          "field_b_term": "gauge vortex (magnetic flux in Z2 gauge theory)"
        },
        {
          "field_a_term": "error correction (moving anyons back to boundary and annihilating)",
          "field_b_term": "confinement transition (anyons become confined at phase boundary)"
        },
        {
          "field_a_term": "threshold error rate (below which QEC succeeds asymptotically)",
          "field_b_term": "phase boundary of Z2 gauge theory (deconfined/topological vs. trivial)"
        }
      ],
      "references": [
        {
          "arxiv": "quant-ph/0303004",
          "note": "Kitaev (2003) - fault-tolerant quantum computation by anyons; original toric code paper"
        },
        {
          "doi": "10.1103/PhysRevLett.96.110405",
          "note": "Levin & Wen (2006) - topological entanglement entropy"
        },
        {
          "doi": "10.1103/PhysRevLett.96.110404",
          "note": "Kitaev & Preskill (2006) - topological entanglement entropy"
        },
        {
          "arxiv": "1207.6823",
          "note": "Haah (2011) - cubic code; fracton topological order"
        },
        {
          "doi": "10.1038/s41586-023-06927-2",
          "note": "Google Quantum AI (2023) - suppressing quantum errors by scaling surface code logical qubits"
        },
        {
          "arxiv": "1411.7041",
          "note": "Almheiri, Dong, Harlow (2015) - bulk locality and quantum error correction in AdS/CFT"
        }
      ],
      "last_reviewed": "2026-05-05",
      "file": "cross-domain/physics-computing/b-quantum-error-correction-topology.yaml"
    },
    {
      "id": "b-spin-glass-neural-networks",
      "title": "Spin-glass statistical mechanics ↔ associative memory capacity and phase transitions in neural networks",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "The Hopfield (1982) model of associative memory is mathematically identical to the Sherrington-Kirkpatrick spin glass: neuron states map to spins, synaptic weights to random exchange couplings, and stored memories to planted low-energy states.  The memory capacity alpha_c = p/N = 0.138 (patterns per",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-hopfield-alzheimers-glass-transition"
      ],
      "communication_gap": "Hopfield acknowledged the connection to spin glasses in his 1982 paper, but the exact capacity calculation required the full machinery of replica symmetry breaking developed by Parisi for spin glasses — a result not published until 1979, and not fully understood until the mid-1980s.  The ML community largely rediscovered this independently through the deep learning loss-landscape literature of 2014-2016, without initially connecting it to the Parisi replica solution.\n",
      "translation_table": [
        {
          "field_a_term": "spin sigma_i in {-1, +1}",
          "field_b_term": "neuron state s_i (firing / silent)",
          "note": "Binary units in both models"
        },
        {
          "field_a_term": "random exchange coupling J_ij",
          "field_b_term": "Hebbian synaptic weight W_ij = (1/N) * sum_mu xi_i^mu * xi_j^mu",
          "note": "Quenched disorder from stored patterns"
        },
        {
          "field_a_term": "spin-glass transition temperature T_g",
          "field_b_term": "capacity threshold alpha_c = 0.138",
          "note": "Phase boundary between ordered (retrieval) and glassy (confused) phases"
        },
        {
          "field_a_term": "energy E = -1/2 sum_ij J_ij s_i s_j",
          "field_b_term": "Hopfield energy E = -1/2 sum_ij W_ij s_i s_j",
          "note": "Identical functional form"
        },
        {
          "field_a_term": "metastable spin-glass state",
          "field_b_term": "spurious memory (confabulation)",
          "note": "Wrong-but-stable attractors below the glass transition"
        },
        {
          "field_a_term": "replica symmetry breaking (Parisi 1979)",
          "field_b_term": "hierarchical memory organisation",
          "note": "RSB predicts ultrametric structure of memory space"
        }
      ],
      "references": [
        {
          "doi": "10.1073/pnas.79.8.2554",
          "note": "Hopfield (1982) — original associative memory model"
        },
        {
          "doi": "10.1103/PhysRevLett.55.1530",
          "note": "Amit, Gutfreund & Sompolinsky (1985) — exact capacity from replica method"
        },
        {
          "doi": "10.1103/PhysRevLett.43.1754",
          "note": "Parisi (1979) — replica symmetry breaking solution"
        },
        {
          "arxiv": "1412.0233",
          "note": "Choromanska et al. (2015) — deep network loss landscape as spin glass"
        },
        {
          "arxiv": "2008.02217",
          "note": "Ramsauer et al. (2020) — modern Hopfield networks, capacity ~ N^2"
        }
      ],
      "last_reviewed": "2026-05-05",
      "file": "cross-domain/physics-computing/b-spin-glass-neural-networks.yaml"
    },
    {
      "id": "b-boltzmann-machine-x-ising-model",
      "title": "Boltzmann machine x Ising model — energy-based learning as statistical mechanics\n",
      "source_domain": "",
      "target_domain": "",
      "status": "proposed",
      "bridge_claim": "A Boltzmann machine is a stochastic neural network whose equilibrium distribution is the Boltzmann distribution of an Ising-type Hamiltonian; training by contrastive divergence minimizes the KL divergence between data distribution and model Boltzmann distribution — learning as statistical mechanics,",
      "open_unknowns": [],
      "related_hypotheses": [],
      "communication_gap": "Statistical physicists studying Ising models and machine learning researchers developing energy-based models developed parallel frameworks; the Hopfield network (1982) made the connection explicit but the practical training algorithm (contrastive divergence) was developed in machine learning without full cross-pollination of the statistical mechanics renormalization group perspective on learning.\n",
      "translation_table": [
        {
          "field_a_term": "synaptic weight W_ij (neural network)",
          "field_b_term": "spin-spin coupling J_ij in Ising Hamiltonian (physics)",
          "note": "The weight matrix in a Boltzmann machine is exactly the coupling matrix of an Ising/Hopfield model"
        },
        {
          "field_a_term": "neuron activation (0/1 or +/-1) (neural network)",
          "field_b_term": "Ising spin s_i = +/-1 (physics)",
          "note": "Stochastic binary neurons are Ising spins; the sigmoid activation function is the Fermi-Dirac distribution"
        },
        {
          "field_a_term": "contrastive divergence training (machine learning)",
          "field_b_term": "thermodynamic integration between two Boltzmann distributions (stat mech)",
          "note": "CD minimizes the difference between free energies of data and model distributions — a statistical mechanics quantity"
        },
        {
          "field_a_term": "restricted Boltzmann machine visible/hidden layers (ML)",
          "field_b_term": "bipartite Ising model with no intra-layer coupling (physics)",
          "note": "The RBM's bipartite structure eliminates frustration, enabling exact inference and tractable partition function computation"
        }
      ],
      "references": [
        {
          "doi": "10.1016/S0364-0213(85)80012-4",
          "note": "Ackley, Hinton & Sejnowski (1985) - A learning algorithm for Boltzmann machines; Cognitive Science 9:147"
        }
      ],
      "last_reviewed": "2026-05-07",
      "file": "cross-domain/physics-cs/b-boltzmann-machine-x-ising-model.yaml"
    },
    {
      "id": "b-cavity-method-x-belief-propagation",
      "title": "Cavity method ↔ Belief propagation — Bethe-Peierls approximation as message passing",
      "source_domain": "",
      "target_domain": "",
      "status": "proposed",
      "bridge_claim": "The cavity method of spin glass theory (Mézard & Parisi) and the belief propagation algorithm in graphical models are identical mathematical objects; the Bethe free energy approximation corresponds to loopy BP on factor graphs, and replica symmetry breaking corresponds to the existence of multiple f",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-cavity-method-x-belief-propagation"
      ],
      "communication_gap": "Spin glass theory (Edwards-Anderson 1975, Parisi 1980) was developed in condensed matter physics; belief propagation (Pearl 1982, 1988) was developed in artificial intelligence for Bayesian networks. Both communities worked on the same mathematical objects (factor graphs, message passing on graphs) for two decades without significant cross-citation. The explicit identification was made by Mézard & Montanari (2009) in the book 'Information, Physics, and Computation', which bridged statistical physics and computer science.",
      "translation_table": [
        {
          "field_a_term": "cavity field h_{i→j} (field at site i excluding site j's influence)",
          "field_b_term": "message μ_{i→j}(xᵢ) in belief propagation (marginal belief sent from i to j)",
          "note": "Cavity field is the log-ratio of BP messages for binary variables; exactly equivalent"
        },
        {
          "field_a_term": "Bethe free energy F_Bethe (approximation via local factor graph structure)",
          "field_b_term": "free energy approximation used in loopy BP (sum-product algorithm)",
          "note": "BP fixed points are exactly the stationary points of the Bethe free energy"
        },
        {
          "field_a_term": "replica symmetry (RS) solution — unique Gibbs measure",
          "field_b_term": "unique fixed point of belief propagation messages",
          "note": "RS ↔ unique BP fixed point; RSB ↔ multiple BP fixed points (frustration, glassy phase)"
        },
        {
          "field_a_term": "survey propagation (SP) for random SAT — generalised BP with surveys",
          "field_b_term": "warning propagation on factor graph near SAT-UNSAT threshold",
          "note": "SP exploits RSB structure to solve random 3-SAT near the threshold more efficiently than BP"
        }
      ],
      "references": [
        {
          "doi": "10.1126/science.1073287",
          "note": "Mézard & Montanari (2002) — analytic and algorithmic solution of random satisfiability problems; Science 297:812"
        },
        {
          "doi": "10.1002/j.1538-7305.1988.tb00880.x",
          "note": "Pearl (1988) — probabilistic reasoning in intelligent systems; Morgan Kaufmann"
        }
      ],
      "last_reviewed": "2026-05-07",
      "file": "cross-domain/physics-cs/b-cavity-method-x-belief-propagation.yaml"
    },
    {
      "id": "b-diffusion-models-x-stochastic-processes",
      "title": "Diffusion Generative Models x Stochastic Differential Equations - score matching as time-reversed diffusion\n",
      "source_domain": "",
      "target_domain": "",
      "status": "proposed",
      "bridge_claim": "Diffusion generative models (DALL-E, Stable Diffusion) learn to reverse a stochastic diffusion process (data to noise) by estimating the score function nabla_x log p(x); the generative SDE is the time-reversal of the forward Ito diffusion, grounded in Anderson's time-reversal theorem - making modern",
      "open_unknowns": [],
      "related_hypotheses": [],
      "communication_gap": "Stochastic differential equations (Ito, 1944) and time-reversal theorems (Anderson, 1982) existed decades before diffusion models; the explicit connection was made by Song et al. (2021) and Ho et al. (2020), enabling continuous-time generalization and principled design of noise schedules using SDE theory.\n",
      "translation_table": [
        {
          "field_a_term": "Forward noising process (data -> Gaussian noise over time T)",
          "field_b_term": "Forward Ito SDE: dX = f(X,t)dt + g(t)dW",
          "note": "The forward diffusion gradually corrupts data by adding Gaussian noise; this is an Ito stochastic differential equation with drift f (mean-reverting) and diffusion coefficient g(t); at time T, the distribution approaches pure noise.\n"
        },
        {
          "field_a_term": "Reverse denoising process (noise -> data sample)",
          "field_b_term": "Time-reversed SDE (Anderson 1982)",
          "note": "Anderson's theorem states the time-reversal of an Ito diffusion is also an Ito SDE: dX = [f - g^2 * nabla log p_t(X)] dt + g dW_bar; the score function nabla log p_t is the only unknown and is learned by the neural network.\n"
        },
        {
          "field_a_term": "Score network (denoising neural network)",
          "field_b_term": "Score function estimator s_theta(x,t) ~ nabla log p_t(x)",
          "note": "The neural network is trained to estimate the score (gradient of log density) at each noise level; this is equivalent to learning to denoise corrupted data by minimizing weighted denoising score matching objectives.\n"
        },
        {
          "field_a_term": "DDPM noise schedule (beta_1, ..., beta_T)",
          "field_b_term": "Variance schedule g^2(t) of the forward SDE",
          "note": "The discrete noise schedule corresponds to the variance function of the forward SDE; continuous-time SDE formulations (Song et al. 2021) unify DDPM, SMLD, and other discrete schedules as special cases.\n"
        }
      ],
      "references": [
        {
          "doi": "10.48550/arXiv.2011.13456",
          "note": "Song et al. (2021) - score-based generative modeling through SDEs; ICLR 2021 Outstanding Paper"
        },
        {
          "doi": "10.48550/arXiv.2006.11239",
          "note": "Ho et al. (2020) - denoising diffusion probabilistic models; NeurIPS 2020"
        }
      ],
      "last_reviewed": "2026-05-07",
      "file": "cross-domain/physics-cs/b-diffusion-models-x-stochastic-processes.yaml"
    },
    {
      "id": "b-mean-field-theory-x-neural-networks",
      "title": "Mean Field Theory x Deep Neural Networks - infinite-width limit as Gaussian process\n",
      "source_domain": "",
      "target_domain": "",
      "status": "proposed",
      "bridge_claim": "In the infinite-width limit, a deep neural network at initialization is exactly a Gaussian process with a kernel determined by the activation function (NNGP kernel); mean field theory of neural networks predicts the edge-of-chaos initialization condition (variance preservation) that enables training",
      "open_unknowns": [],
      "related_hypotheses": [],
      "communication_gap": "Statistical physicists developed mean-field theory of disordered systems (Sherrington- Kirkpatrick model, Parisi replica method) in the 1970s-80s; its application to deep learning was made explicit only around 2017 (Poole et al., Lee et al.) despite strong mathematical parallels available for decades.\n",
      "translation_table": [
        {
          "field_a_term": "Pre-activations at layer l (hidden unit values before activation)",
          "field_b_term": "Gaussian random field (mean-field approximation)",
          "note": "As width -> infinity, each pre-activation is a sum of infinitely many independent terms, converging to Gaussian by CLT - the mean-field limit; the resulting distribution is determined solely by the variance propagation equations.\n"
        },
        {
          "field_a_term": "Variance propagation through activation functions",
          "field_b_term": "Mean-field self-consistency equations",
          "note": "The variance of pre-activations propagates through the network via sigma^2_{l+1} = sigma_w^2 * E[phi(z)^2] + sigma_b^2, a mean-field fixed-point equation whose solution determines the initialization distribution.\n"
        },
        {
          "field_a_term": "Edge-of-chaos condition (chi = 1, order-to-chaos transition)",
          "field_b_term": "Critical point of mean-field dynamics",
          "note": "The mean-field order parameter chi (Jacobian eigenvalue) equals 1 at the critical point between ordered (chi < 1, vanishing gradient) and chaotic (chi > 1, exploding gradient) phases; training is possible only at this edge-of-chaos critical point.\n"
        },
        {
          "field_a_term": "NNGP kernel K^l(x,x')",
          "field_b_term": "Gaussian process covariance function",
          "note": "The dot-product kernel computed by the mean-field recursion is the covariance of the infinite-width neural network Gaussian process; it determines the generalization properties without any training.\n"
        }
      ],
      "references": [
        {
          "doi": "10.48550/arXiv.1711.00165",
          "note": "Lee et al. (2018) - deep neural networks as Gaussian processes; ICLR 2018"
        },
        {
          "doi": "10.48550/arXiv.1606.05340",
          "note": "Poole et al. (2016) - exponential expressivity from shallow networks at the edge of chaos"
        }
      ],
      "last_reviewed": "2026-05-07",
      "file": "cross-domain/physics-cs/b-mean-field-theory-x-neural-networks.yaml"
    },
    {
      "id": "b-quantum-error-correction-x-topological-codes",
      "title": "Quantum error correction x Topological codes — anyons as logical qubits\n",
      "source_domain": "",
      "target_domain": "",
      "status": "proposed",
      "bridge_claim": "Topological quantum error correction (surface codes, toric codes) encodes logical qubits in the global topology of anyon configurations; logical errors require macroscopic anyon movement, making decoherence exponentially suppressed in system size — the same topological protection that makes quantum ",
      "open_unknowns": [],
      "related_hypotheses": [],
      "communication_gap": "Condensed matter physicists studying fractional quantum Hall states and computer scientists designing fault-tolerant quantum algorithms share mathematics (topological field theory, category theory of anyons) but publish in disjoint journals (Physical Review Letters vs Quantum Information & Computation), causing slow cross-pollination of experimental insights.\n",
      "translation_table": [
        {
          "field_a_term": "anyon braiding (condensed matter physics)",
          "field_b_term": "logical gate on topological qubit (quantum computing)",
          "note": "Non-abelian anyon worldline braids implement fault-tolerant quantum gates without external control pulses"
        },
        {
          "field_a_term": "topological ground state degeneracy (physics)",
          "field_b_term": "logical qubit Hilbert space (quantum computing)",
          "note": "The 2^k-fold degenerate ground state of a toric code encodes k logical qubits protected by the energy gap"
        },
        {
          "field_a_term": "anyon pair creation energy gap Δ (physics)",
          "field_b_term": "code distance d (quantum error correction)",
          "note": "The energy gap suppresses thermal anyon creation; code distance quantifies how many physical errors must occur for a logical error"
        },
        {
          "field_a_term": "quantum Hall edge state (condensed matter)",
          "field_b_term": "topologically protected logical channel (quantum computing)",
          "note": "Both are immune to local perturbations by virtue of topological invariance of the bulk"
        }
      ],
      "references": [
        {
          "doi": "10.1103/RevModPhys.80.1083",
          "note": "Nayak et al. (2008) - Non-abelian anyons and topological quantum computation; Rev Mod Phys 80:1083"
        }
      ],
      "last_reviewed": "2026-05-07",
      "file": "cross-domain/physics-cs/b-quantum-error-correction-x-topological-codes.yaml"
    },
    {
      "id": "b-quantum-walk-x-classical-random-walk",
      "title": "Quantum Walks x Classical Random Walks — interference as search speedup\n",
      "source_domain": "",
      "target_domain": "",
      "status": "proposed",
      "bridge_claim": "Quantum walks replace classical random walk coin flipping with quantum superposition and interference; the probability distribution spreads ballistically (σ ∝ t) rather than diffusively (σ ∝ √t), providing quadratic speedup for spatial search algorithms and underpinning quantum walk-based quantum co",
      "open_unknowns": [],
      "related_hypotheses": [],
      "communication_gap": "Random walk theory (Einstein, Smoluchowski) and quantum mechanics (Feynman path integrals) were developed in separate communities; quantum walks were introduced by Aharonov-Ambainis-Kempe in 2001 as a direct quantum analog of classical random walks, but the algorithmic implications for graph problems were developed independently of the physics literature on quantum diffusion.\n",
      "translation_table": [
        {
          "field_a_term": "Quantum coin operator C (Hadamard)",
          "field_b_term": "Superposition replacement for classical coin flip",
          "note": "The Hadamard gate creates equal superposition of left/right; unlike classical coin flip (entropy increase), the quantum coin is unitary (no entropy increase) and enables interference between left and right amplitudes.\n"
        },
        {
          "field_a_term": "Ballistic spreading σ(t) ∝ t",
          "field_b_term": "Constructive interference in quantum walk",
          "note": "Quantum interference causes the wavefunction to constructively add at the edges of the distribution (rather than the center); the standard deviation grows linearly in t, providing quadratic speedup over classical diffusion (σ ∝ √t).\n"
        },
        {
          "field_a_term": "Quantum walk search on N-vertex graph (O(√N) time)",
          "field_b_term": "Grover's search speedup via quantum walk",
          "note": "Szegedy (2004) showed quantum walk on a bipartite graph achieves quadratic speedup over classical Markov chain mixing; for searching an N-element database, quantum walk requires O(√N) steps vs. O(N) classically.\n"
        },
        {
          "field_a_term": "Decoherence → classical random walk",
          "field_b_term": "Quantum-to-classical transition for walks",
          "note": "Adding environmental decoherence (measurement noise, phase randomization) to a quantum walk continuously interpolates between quantum (ballistic) and classical (diffusive) spreading — providing a quantum-classical crossover model.\n"
        }
      ],
      "references": [
        {
          "doi": "10.1103/PhysRevA.48.1687",
          "note": "Aharonov, Ambainis, Kempe & Vazirani (2001) — Quantum walks on graphs; STOC 2001 (see also Phys Rev A 48:1687 for related work)"
        }
      ],
      "last_reviewed": "2026-05-07",
      "file": "cross-domain/physics-cs/b-quantum-walk-x-classical-random-walk.yaml"
    },
    {
      "id": "b-renormalization-group-x-machine-learning",
      "title": "Renormalization Group x Machine Learning — coarse-graining as representation learning\n",
      "source_domain": "",
      "target_domain": "",
      "status": "proposed",
      "bridge_claim": "The renormalization group (RG) flow in statistical physics — iteratively integrating out short-scale degrees of freedom — is mathematically equivalent to the hierarchical feature extraction performed by deep neural networks; both perform optimal lossy compression of information across scales.\n",
      "open_unknowns": [],
      "related_hypotheses": [],
      "communication_gap": "Physicists and ML researchers both discovered hierarchical coarse-graining independently; the RG community rarely reads NeurIPS and vice versa, causing 40 years of parallel development.\n",
      "translation_table": [
        {
          "field_a_term": "RG block-spin transformation",
          "field_b_term": "Convolutional pooling layer",
          "note": "Both discard fine-grained detail while preserving long-range correlations; Kadanoff block-spin coarse-graining maps directly onto average-pooling in CNNs.\n"
        },
        {
          "field_a_term": "Fixed point of RG flow",
          "field_b_term": "Learned representation at network depth",
          "note": "Stable fixed points correspond to universality classes; deep network representations converge toward invariant feature manifolds.\n"
        },
        {
          "field_a_term": "Relevant vs irrelevant operators",
          "field_b_term": "High vs low-weight learned features",
          "note": "RG relevance (eigenvalue > 1) corresponds to features that survive compression — analogous to large singular values in learned weight matrices.\n"
        },
        {
          "field_a_term": "Renormalization group flow equations",
          "field_b_term": "Backpropagation gradient flow",
          "note": "Both describe how information propagates across scales; the beta function in RG mirrors the chain rule in backprop.\n"
        }
      ],
      "references": [
        {
          "doi": "10.1073/pnas.1710026115",
          "note": "Mehta & Schwab (2014) — exact mapping between variational RG and restricted Boltzmann machines"
        },
        {
          "doi": "10.1103/PhysRevX.8.031003",
          "note": "Koch-Janusz & Ringel (2018) — mutual information maximization connects RG and representation learning"
        }
      ],
      "last_reviewed": "2026-05-07",
      "file": "cross-domain/physics-cs/b-renormalization-group-x-machine-learning.yaml"
    },
    {
      "id": "b-renormalization-x-compression",
      "title": "Renormalization x Data Compression - irrelevant operators as redundant bits\n",
      "source_domain": "",
      "target_domain": "",
      "status": "proposed",
      "bridge_claim": "Lossy data compression (JPEG, MP3, rate-distortion theory) and the renormalization group (integrating out short-scale fluctuations) both perform optimal coarse- graining: both discard information that is maximally irrelevant to the macroscopic description; the rate-distortion function R(D) is the in",
      "open_unknowns": [],
      "related_hypotheses": [],
      "communication_gap": "Shannon rate-distortion theory (1959) and Wilson's renormalization group (1971) were developed independently in information theory and physics; their deep mathematical similarity was pointed out by various authors but a rigorous quantitative correspondence (mapping rate-distortion to RG beta functions) has only recently been formalized.\n",
      "translation_table": [
        {
          "field_a_term": "RG relevant operator (grows under coarse-graining)",
          "field_b_term": "Low-frequency signal component (survives compression)",
          "note": "Relevant operators in the RG sense have scaling dimension > d and grow under coarse-graining; they correspond to the low-frequency components of a signal that are preserved by lossy compression (JPEG DCT low-frequency coefficients, MP3 critical band energies).\n"
        },
        {
          "field_a_term": "RG irrelevant operator (vanishes under coarse-graining)",
          "field_b_term": "High-frequency signal component (discarded by compression)",
          "note": "Irrelevant operators shrink under coarse-graining and are discarded in the RG flow; they correspond to high-frequency, fine-detail information discarded in lossy compression - perceptually irrelevant to the macroscopic (human-perceivable) representation.\n"
        },
        {
          "field_a_term": "RG fixed point (universal behavior)",
          "field_b_term": "Optimal codebook (rate-distortion optimal code)",
          "note": "The RG fixed point is the universal limiting behavior; the rate-distortion optimal code is the limiting codebook that achieves minimum rate for given distortion - both represent the minimal sufficient description of the system.\n"
        },
        {
          "field_a_term": "Kadanoff block-spin coarse-graining",
          "field_b_term": "Wavelet or DCT transform with thresholding",
          "note": "Block-spin averaging over a lattice is mathematically equivalent to a spatial low-pass filter; DCT/wavelet decomposition plus coefficient thresholding performs the same operation on discrete signals.\n"
        }
      ],
      "references": [
        {
          "doi": "10.1007/s11538-017-0362-y",
          "note": "Koch-Janusz & Ringel (2018) - mutual information, neural networks and the renormalization group; Nature Physics"
        },
        {
          "doi": "10.1109/TIT.1956.1056802",
          "note": "Shannon (1959) - coding theorems for a discrete source with a fidelity criterion; IRE Conv Rec 4:142"
        }
      ],
      "last_reviewed": "2026-05-07",
      "file": "cross-domain/physics-cs/b-renormalization-x-compression.yaml"
    },
    {
      "id": "b-reservoir-computing-x-dynamical-systems",
      "title": "Reservoir computing ↔ Dynamical systems — echo state networks as kernel machines",
      "source_domain": "",
      "target_domain": "",
      "status": "proposed",
      "bridge_claim": "Reservoir computing (echo state networks, liquid state machines) projects input time series through a fixed high-dimensional recurrent network (the reservoir) operating near the edge of chaos; only the readout weights are trained, exploiting the kernel trick in function space — connecting neuromorph",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-reservoir-computing-x-dynamical-systems"
      ],
      "communication_gap": "Echo state networks (Jaeger 2001) and liquid state machines (Maass 2002) were developed simultaneously and independently in neuro-inspired computing, while kernel methods and SVM theory were developed in statistical learning theory. The identification of reservoirs as implicit kernel machines was not formalised until Hermans & Schrauwen (2012), despite the mathematical equivalence being visible in the original formulations.",
      "translation_table": [
        {
          "field_a_term": "reservoir (fixed random recurrent network) in echo state network",
          "field_b_term": "kernel function mapping inputs to high-dimensional feature space",
          "note": "The reservoir implements an implicit kernel: inner product in reservoir state space"
        },
        {
          "field_a_term": "spectral radius ρ(W) of reservoir weight matrix",
          "field_b_term": "proximity to edge of chaos (criticality parameter)",
          "note": "ρ < 1 gives stable (non-chaotic) dynamics; ρ ≈ 0.9 is optimal for memory-nonlinearity trade-off"
        },
        {
          "field_a_term": "echo state property (input history determines current state)",
          "field_b_term": "fading memory property of dynamical systems (recent past dominates)",
          "note": "Echo state property guarantees unique reservoir response for any input history"
        },
        {
          "field_a_term": "readout linear regression (only trained component)",
          "field_b_term": "kernel machine linear classifier on projected features",
          "note": "Training only the linear readout is equivalent to kernel ridge regression"
        }
      ],
      "references": [
        {
          "doi": "10.1162/089976602760407955",
          "note": "Jaeger & Haas (2004) — harnessing nonlinearity with echo state networks; Science 304:78"
        },
        {
          "doi": "10.1126/science.1091277",
          "note": "Maass, Natschläger & Markram (2002) — liquid state machines; Neural Comput 14:2531"
        }
      ],
      "last_reviewed": "2026-05-07",
      "file": "cross-domain/physics-cs/b-reservoir-computing-x-dynamical-systems.yaml"
    },
    {
      "id": "b-simulated-annealing-x-statistical-mechanics",
      "title": "Simulated annealing x Statistical mechanics — optimization as cooling\n",
      "source_domain": "",
      "target_domain": "",
      "status": "proposed",
      "bridge_claim": "Simulated annealing solves combinatorial optimization by mimicking thermal annealing: accepting uphill moves with probability exp(-delta_E/T) and slowly reducing T; this is exactly the Metropolis-Hastings MCMC algorithm for sampling the Boltzmann distribution, making the convergence guarantee equiva",
      "open_unknowns": [],
      "related_hypotheses": [],
      "communication_gap": "Operations researchers developing combinatorial optimization and physicists studying thermal equilibration both work with energy landscapes but rarely collaborated; Kirkpatrick et al. (1983) explicitly imported the Metropolis algorithm from physics, but the quantitative connection to statistical mechanics phase transitions in optimization (e.g., replica method) was developed primarily by physicists.\n",
      "translation_table": [
        {
          "field_a_term": "temperature T in annealing schedule (optimization)",
          "field_b_term": "thermal energy kT in Boltzmann distribution (statistical mechanics)",
          "note": "The annealing schedule T(t) plays the role of physical temperature; slower cooling allows better equilibration at each T"
        },
        {
          "field_a_term": "objective function / cost E(x) (optimization)",
          "field_b_term": "Hamiltonian / energy function H(x) (statistical mechanics)",
          "note": "The cost function is treated as an energy landscape; minimizing cost maps to finding the ground state"
        },
        {
          "field_a_term": "Metropolis acceptance criterion exp(-delta_E/T) (optimization)",
          "field_b_term": "detailed balance in Markov chain for Boltzmann distribution (stat mech)",
          "note": "The Metropolis criterion ensures the Markov chain has the Boltzmann distribution as its stationary distribution"
        },
        {
          "field_a_term": "global minimum of cost function (optimization)",
          "field_b_term": "ground state of physical system (statistical mechanics)",
          "note": "The third law of thermodynamics (entropy -> 0 as T -> 0) guarantees the system reaches the ground state under infinitely slow cooling"
        }
      ],
      "references": [
        {
          "doi": "10.1126/science.220.4598.671",
          "note": "Kirkpatrick et al. (1983) - Optimization by simulated annealing; Science 220:671"
        }
      ],
      "last_reviewed": "2026-05-07",
      "file": "cross-domain/physics-cs/b-simulated-annealing-x-statistical-mechanics.yaml"
    },
    {
      "id": "b-thermodynamics-x-information-theory",
      "title": "Thermodynamics x Information Theory — entropy as the universal currency\n",
      "source_domain": "",
      "target_domain": "",
      "status": "proposed",
      "bridge_claim": "Boltzmann's thermodynamic entropy S = k_B ln Omega and Shannon's information entropy H = -sum p_i log p_i are the same mathematical object; physical heat dissipation and information erasure are two faces of the same phenomenon, unified by Landauer's principle.\n",
      "open_unknowns": [],
      "related_hypotheses": [],
      "communication_gap": "Shannon deliberately omitted physical interpretation when naming 'entropy' on von Neumann's advice; this obscured the connection for decades until Landauer and Bennett formalized it in the 1960s-80s.\n",
      "translation_table": [
        {
          "field_a_term": "Thermodynamic entropy (Boltzmann)",
          "field_b_term": "Shannon entropy (information bits)",
          "note": "S/k_B = H * ln 2; the Boltzmann constant is just a unit conversion factor between joules/kelvin and bits.\n"
        },
        {
          "field_a_term": "Landauer erasure (kT ln 2 per bit)",
          "field_b_term": "Minimum heat cost of computation",
          "note": "Every irreversible bit erasure dissipates at least kT ln 2 approximately 2.9 zJ at room temperature — verified experimentally.\n"
        },
        {
          "field_a_term": "Maxwell's demon paradox",
          "field_b_term": "Measurement and feedback control",
          "note": "The demon's memory must be erased, costing energy >= kT ln 2 per bit — information has thermodynamic weight.\n"
        },
        {
          "field_a_term": "Free energy F = U - TS",
          "field_b_term": "Minimum description length (MDL)",
          "note": "Minimizing free energy is equivalent to finding the shortest description of a system — connects thermodynamics to algorithmic information theory.\n"
        }
      ],
      "references": [
        {
          "doi": "10.1147/rd.53.0183",
          "note": "Landauer (1961) — irreversibility and heat generation in the computing process; foundational paper"
        },
        {
          "doi": "10.1103/PhysRevLett.108.120602",
          "note": "Berut et al. (2012) — experimental verification of Landauer's principle at single-bit level"
        }
      ],
      "last_reviewed": "2026-05-07",
      "file": "cross-domain/physics-cs/b-thermodynamics-x-information-theory.yaml"
    },
    {
      "id": "b-topological-insulator-x-band-theory",
      "title": "Topological Insulators x Band Theory — bulk-boundary correspondence as topological protection\n",
      "source_domain": "",
      "target_domain": "",
      "status": "proposed",
      "bridge_claim": "Topological insulators have conducting surface states protected by time-reversal symmetry that cannot be removed by any perturbation that preserves the symmetry; these states are guaranteed by the bulk topological invariant (Z2 index) via the bulk-boundary correspondence — a mathematical theorem fro",
      "open_unknowns": [],
      "related_hypotheses": [],
      "communication_gap": "Condensed matter physicists developed topological band theory through materials intuition (spin-orbit coupling in heavy elements), while mathematicians developed K-theory for vector bundles independently; the connection (Bloch bundle K-theory) was made only in the 2000s despite decades of parallel development.\n",
      "translation_table": [
        {
          "field_a_term": "Z2 topological invariant (bulk)",
          "field_b_term": "Topological index from K-theory",
          "note": "The Z2 index classifies band structures into trivial (0) and topological (1) phases; K-theory provides the mathematical framework showing which invariants are possible in each symmetry class.\n"
        },
        {
          "field_a_term": "Surface conducting states",
          "field_b_term": "Boundary modes of topological band structure",
          "note": "Bulk-boundary correspondence guarantees gapless surface states at the interface between a topological insulator and a trivial insulator (or vacuum).\n"
        },
        {
          "field_a_term": "Time-reversal symmetry protection",
          "field_b_term": "Symmetry class AII in the ten-fold way",
          "note": "The Altland-Zirnbauer classification (ten-fold way) assigns band structures to symmetry classes; class AII (time-reversal with T² = -1) supports Z2 topological invariants in 2D and 3D.\n"
        },
        {
          "field_a_term": "Band gap in bulk",
          "field_b_term": "Spectral gap of Hamiltonian",
          "note": "The bulk band gap is the spectral gap of the Bloch Hamiltonian; topology is only well-defined when this gap is finite, analogous to the energy gap in quantum field theory.\n"
        }
      ],
      "references": [
        {
          "doi": "10.1103/RevModPhys.82.3045",
          "note": "Hasan & Kane (2010) — Colloquium: Topological insulators; Rev Mod Phys 82:3045"
        }
      ],
      "last_reviewed": "2026-05-07",
      "file": "cross-domain/physics-cs/b-topological-insulator-x-band-theory.yaml"
    },
    {
      "id": "b-variational-inference-x-free-energy",
      "title": "Variational inference x Free energy minimization - Bayesian inference as thermodynamics\n",
      "source_domain": "",
      "target_domain": "",
      "status": "proposed",
      "bridge_claim": "Variational Bayesian inference minimizes the variational free energy F = E[log q] - E[log p] (equivalent to maximizing the ELBO), which is identical to the Helmholtz free energy F = U - TS in statistical mechanics; the approximate posterior q is the Boltzmann distribution at effective temperature se",
      "open_unknowns": [],
      "related_hypotheses": [],
      "communication_gap": "Statistical physicists developed free energy minimization as a thermodynamic optimization principle while Bayesian statisticians independently developed variational inference as an approximation to posterior computation; the connection was made explicit by Hinton and Zemel (1994) with Helmholtz machines and later by the free energy principle (Friston 2010), but practical machine learning (VAEs) rarely cites the statistical mechanics literature.\n",
      "translation_table": [
        {
          "field_a_term": "approximate posterior q(z|x) (Bayesian inference)",
          "field_b_term": "Boltzmann equilibrium distribution exp(-E/kT)/Z (statistical mechanics)",
          "note": "The variational posterior minimizing free energy is the Boltzmann distribution; the temperature T is set by prior strength"
        },
        {
          "field_a_term": "KL divergence KL(q||p) (variational inference)",
          "field_b_term": "relative entropy / Kullback-Leibler divergence in non-equilibrium thermodynamics (stat mech)",
          "note": "The KL divergence is the excess free energy above the true minimum; it quantifies distance from Boltzmann equilibrium"
        },
        {
          "field_a_term": "evidence lower bound ELBO = E_q[log p(x,z)] - E_q[log q(z|x)] (ML)",
          "field_b_term": "negative Helmholtz free energy F = -U + TS (thermodynamics)",
          "note": "Maximizing ELBO equals minimizing Helmholtz free energy; the expected log-likelihood maps to negative internal energy U"
        },
        {
          "field_a_term": "variational parameters phi (neural network weights in amortized inference)",
          "field_b_term": "order parameter in Landau theory / control parameter in stat mech (physics)",
          "note": "Optimizing variational parameters corresponds to finding the order parameter that minimizes free energy"
        }
      ],
      "references": [
        {
          "doi": "10.1162/neco.1995.7.5.1022",
          "note": "Hinton & Zemel (1994) - Autoencoders, minimum description length and Helmholtz free energy; NeurIPS 7"
        }
      ],
      "last_reviewed": "2026-05-07",
      "file": "cross-domain/physics-cs/b-variational-inference-x-free-energy.yaml"
    },
    {
      "id": "b-ecological-stoichiometry-redfield",
      "title": "Redfield ratio C:N:P=106:16:1 ↔ optimality of molecular machines: ocean chemistry as evolved biochemical constraint",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "Redfield (1934, 1958) discovered that dissolved inorganic nutrients in the deep ocean maintain a remarkably constant ratio of C:N:P = 106:16:1 (atomic), and that marine phytoplankton cellular composition matches this ratio. This could not be a coincidence: Redfield argued that ocean chemistry is sha",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-redfield-growth-rate-rg-fixed-point"
      ],
      "communication_gap": "Redfield published in the American Scientist (1958), a general audience journal. Sterner & Elser's synthesis (2002 book \"Ecological Stoichiometry\") is well known in ecology but has minimal penetration into biochemistry or evolutionary biology. The connection to the physics of optimal resource allocation and renormalization-group fixed points has not been made in any major review paper, despite the conceptual equivalence being straightforward.\n",
      "translation_table": [
        {
          "field_a_term": "Redfield ratio 106:16:1",
          "field_b_term": "stoichiometry of ribosomes:proteins:structural biomass",
          "note": "RNA (ribosomes) is P-rich ~9% P by mass; proteins N-rich ~16% N"
        },
        {
          "field_a_term": "deep-ocean nutrient homeostasis",
          "field_b_term": "evolutionary attractor of growth-rate optimization",
          "note": "Ocean chemistry is a dynamical fixed point, not a geochemical coincidence"
        },
        {
          "field_a_term": "Growth Rate Hypothesis",
          "field_b_term": "faster growth requires more ribosomes → more P → lower N:P",
          "note": "Predicts deviation from Redfield in fast-growing organisms"
        },
        {
          "field_a_term": "stoichiometric imbalance (food quality)",
          "field_b_term": "nutrient limitation in food webs",
          "note": "Herbivores constrained by mismatch between food C:P and body C:P"
        },
        {
          "field_a_term": "Liebig's law of the minimum",
          "field_b_term": "single limiting nutrient controls growth",
          "note": "N vs P vs Fe limitation of ocean productivity - Redfield sets which"
        }
      ],
      "references": [
        {
          "doi": "10.2307/1622",
          "note": "Redfield (1958) American Scientist - Redfield ratio original statement"
        },
        {
          "doi": "10.1017/CBO9780511612213",
          "note": "Sterner & Elser (2002) - Ecological Stoichiometry textbook"
        },
        {
          "doi": "10.1046/j.1461-0248.2001.00249.x",
          "note": "Elser et al. (2000) Ecology Letters - Growth Rate Hypothesis"
        },
        {
          "doi": "10.1126/science.1227379",
          "note": "Loladze & Elser (2011) Science - why is P-rich RNA ancestrally conserved"
        }
      ],
      "last_reviewed": "2026-05-05",
      "file": "cross-domain/physics-ecology/b-ecological-stoichiometry-redfield.yaml"
    },
    {
      "id": "b-habitat-percolation-ecology",
      "title": "Habitat fragmentation is a percolation phase transition — species extinction risk collapses discontinuously when connected habitat falls below the percolation threshold, and finite-size scaling predicts exactly how this threshold shifts in landscapes of finite total area.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "proposed",
      "bridge_claim": "In bond/site percolation on a lattice, a giant connected cluster (spanning the system) disappears abruptly below a critical occupancy p_c. In fragmented landscapes, habitat patches connected by dispersal corridors form exactly such a network, and species requiring connected habitat experience a shar",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-habitat-percolation-critical-density"
      ],
      "communication_gap": "Percolation theory has been used in landscape ecology since Gardner et al. (1987), but primarily as a simulation tool rather than an analytical framework. The formal machinery of finite-size scaling, universality classes, and critical exponents from condensed-matter physics has not penetrated conservation biology literature. As a result, the SLOSS debate continues without the rigorous quantitative framework that would resolve it. Conservation biology journals do not typically handle manuscripts using Wilson-Fisher RG, critical exponents, or FSS corrections — even though the underlying biology is exactly described by these tools.\n",
      "translation_table": [
        {
          "field_a_term": "site percolation threshold p_c",
          "field_b_term": "critical habitat coverage fraction (~60%) below which landscape connectivity collapses"
        },
        {
          "field_a_term": "giant connected component",
          "field_b_term": "connected habitat network allowing population rescue by immigration"
        },
        {
          "field_a_term": "finite-size scaling correction to p_c",
          "field_b_term": "threshold shift in smaller landscape patches vs continental-scale habitat"
        },
        {
          "field_a_term": "correlation length xi (diverges at p_c)",
          "field_b_term": "mean dispersal distance required to maintain connectivity near threshold"
        },
        {
          "field_a_term": "cluster size distribution (power law at p_c)",
          "field_b_term": "patch size distribution in fragmented landscapes at the extinction threshold"
        },
        {
          "field_a_term": "universality class (2D percolation, nu=4/3)",
          "field_b_term": "spatial connectivity structure of landscapes where habitat is random vs structured"
        },
        {
          "field_a_term": "bond vs site percolation",
          "field_b_term": "corridor-dependent dispersal (bond) vs patch-quality-dependent dispersal (site)"
        }
      ],
      "references": [
        {
          "doi": "10.1007/BF02275266",
          "note": "Gardner et al. (1987) — original application of percolation to landscape ecology"
        },
        {
          "doi": "10.1007/BF02275369",
          "note": "With & Crist (1995) — percolation threshold and animal movement in fragmented landscapes"
        },
        {
          "doi": "10.1023/A:1008917418979",
          "note": "Flather & Bevers (2002) — patch size and connectivity in bird species persistence"
        },
        {
          "arxiv": "cond-mat/9212004",
          "note": "Finite-size scaling in percolation (same toolkit as b-percolation-epidemiology)"
        },
        {
          "doi": "10.1038/nature04317",
          "note": "Hanski (2005) — metapopulation dynamics in fragmented landscapes; implicit percolation structure"
        }
      ],
      "last_reviewed": "2026-05-04",
      "file": "cross-domain/physics-ecology/b-habitat-percolation-ecology.yaml"
    },
    {
      "id": "b-maximum-entropy-ecology",
      "title": "Jaynes's maximum-entropy (MaxEnt) principle from statistical mechanics — applied with macroecological state variables as constraints — predicts species abundance distributions, species-area relationships, and metabolic scaling in ecological communities with no free parameters, demonstrating that biodiversity patterns emerge from information-theoretic constraints rather than species-specific biology.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "proposed",
      "bridge_claim": "Jaynes (1957) showed that the Boltzmann-Gibbs distribution is the unique probability distribution that maximizes Shannon entropy subject to known macroscopic constraints (e.g. fixed mean energy). Harte et al. (2008, 2011) applied this to ecology: given the constraints of total species richness S, to",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-mete-non-equilibrium-deviations"
      ],
      "communication_gap": "Macroecology has been dominated by mechanistic debates (niche vs. neutral theory) that make species-specific assumptions. Jaynes's MaxEnt formalism is standard in physics and machine learning but largely unfamiliar to ecologists. Harte's METE group (UC Berkeley) has applied it rigorously, but adoption in mainstream ecology journals is slow because the information-theoretic framing differs fundamentally from the mechanistic tradition. Physicists who know MaxEnt rarely study ecology; ecologists rarely read Jaynes.\n",
      "translation_table": [
        {
          "field_a_term": "Boltzmann distribution (maximizes entropy at fixed mean energy)",
          "field_b_term": "log-series species abundance distribution (maximizes entropy at fixed N, S)"
        },
        {
          "field_a_term": "macroscopic constraint (mean energy <E>)",
          "field_b_term": "macroecological state variable (N, S, E — abundance, richness, metabolic rate)"
        },
        {
          "field_a_term": "partition function Z (Boltzmann)",
          "field_b_term": "Lagrange multiplier structure in METE (lambda_1, lambda_2)"
        },
        {
          "field_a_term": "equation of state (thermodynamic identity)",
          "field_b_term": "METE's constraint equations linking S, N, E to SAD shape"
        },
        {
          "field_a_term": "deviation from MaxEnt = non-equilibrium / additional information",
          "field_b_term": "deviation from METE prediction = ecological mechanism (invasions, disturbance)"
        },
        {
          "field_a_term": "microstate (energy distribution over molecules)",
          "field_b_term": "abundance distribution over individuals across species"
        }
      ],
      "references": [
        {
          "doi": "10.1103/PhysRev.106.620",
          "note": "Jaynes (1957) - maximum entropy and statistical mechanics"
        },
        {
          "doi": "10.1111/j.1461-0248.2008.01201.x",
          "note": "Harte et al. (2008) - maximum entropy and the state-variable approach to macroecology"
        },
        {
          "doi": "10.1111/j.1461-0248.2009.01309.x",
          "note": "Harte et al. (2009) - METE predictions of species abundance distributions"
        },
        {
          "url": "https://global.oup.com/academic/product/maximum-entropy-and-ecology-9780199593415",
          "note": "Harte (2011) - Maximum Entropy and Ecology (Oxford University Press)"
        },
        {
          "doi": "10.1126/science.284.5414.550",
          "note": "Hubbell (2001) - Neutral theory of biodiversity (comparison baseline)"
        }
      ],
      "last_reviewed": "2026-05-05",
      "file": "cross-domain/physics-ecology/b-maximum-entropy-ecology.yaml"
    },
    {
      "id": "b-turing-patterns-ecosystem-tipping",
      "title": "Turing vegetation patterns as early-warning signals for catastrophic ecosystem collapse",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "In dryland ecosystems, plant biomass and water interact as activator-inhibitor pairs that satisfy the Turing reaction-diffusion conditions (Klausmeier 1999). At intermediate rainfall, vegetation self-organises into regular spatial patterns — stripes, spots, or gaps — whose geometry is determined by ",
      "open_unknowns": [],
      "related_hypotheses": [],
      "communication_gap": "Turing (1952) was a mathematician writing in Phil Trans Royal Society. Klausmeier (1999) was an ecologist writing in Science. Rietkerk et al. (2004) was published in Science. But the explicit connection between the Turing mechanism, percolation, and SOC was not made in a single accessible paper until around 2010s. Remote sensing ecologists who monitor vegetation patterns do not routinely use Fourier-spectrum Turing diagnostics; they use NDVI thresholds. The mathematical toolkit from pattern formation physics has not been standardised in ecological monitoring practice.\n",
      "translation_table": [
        {
          "field_a_term": "Activator (vegetation)",
          "field_b_term": "Species biomass / canopy cover",
          "note": "Positive feedback: more vegetation retains water, enabling more growth"
        },
        {
          "field_a_term": "Inhibitor (soil water)",
          "field_b_term": "Water table / soil moisture",
          "note": "Vegetation depletes local water, creating the differential diffusion condition"
        },
        {
          "field_a_term": "Turing wavelength Lambda*",
          "field_b_term": "Characteristic patch spacing (measurable via satellite)",
          "note": "Shift in Lambda* towards shorter wavelengths precedes collapse"
        },
        {
          "field_a_term": "Turing bifurcation (d_critical)",
          "field_b_term": "Rainfall threshold below which patterns appear",
          "note": "Below this rainfall, uniform vegetation is unstable"
        },
        {
          "field_a_term": "SOC / percolation threshold p_c",
          "field_b_term": "Minimum connected vegetated fraction for ecosystem function",
          "note": "When the spot pattern fragments below p_c, the ecosystem loses connectivity"
        }
      ],
      "references": [
        {
          "doi": "10.1126/science.1116505",
          "note": "Rietkerk et al. (2004) Science — self-organisation and tipping points in dryland ecosystems. Seeded from arXiv nlin:AO harvest 2026-05-04: Vegetation Patterning Can Both Impede and Trigger Critical Transitions from Savanna to Grassland"
        },
        {
          "doi": "10.1086/303135",
          "note": "Klausmeier (1999) Science — regular and irregular patterns in semiarid vegetation"
        },
        {
          "doi": "10.1098/rstb.1952.0012",
          "note": "Turing (1952) — The chemical basis of morphogenesis"
        }
      ],
      "last_reviewed": "2026-05-05",
      "file": "cross-domain/physics-ecology/b-turing-patterns-ecosystem-tipping.yaml"
    },
    {
      "id": "b-agent-based-models-x-emergent-markets",
      "title": "Agent-Based Models x Market Dynamics - heterogeneous agents as interacting particles\n",
      "source_domain": "",
      "target_domain": "",
      "status": "proposed",
      "bridge_claim": "Agent-based financial market models treat traders as heterogeneous interacting agents with bounded rationality; fat-tailed return distributions, volatility clustering, and market crashes emerge without being programmed - as collective phenomena analogous to phase transitions in heterogeneous spin sy",
      "open_unknowns": [],
      "related_hypotheses": [],
      "communication_gap": "Classical financial economics (Black-Scholes, EMH) assumes homogeneous rational agents; econophysicists (Mantegna, Stanley) imported spin system physics to financial markets in the 1990s; mainstream economics has been slow to adopt agent-based models despite their empirical success at reproducing stylized facts.\n",
      "translation_table": [
        {
          "field_a_term": "Trader (fundamentalist vs trend-follower)",
          "field_b_term": "Spin with heterogeneous coupling (Ising-like)",
          "note": "Fundamentalist traders (buy when price < value) and trend-following traders (buy when price rises) correspond to competing interactions; their relative abundance shifts the market between efficient (disordered) and trending (ordered) phases.\n"
        },
        {
          "field_a_term": "Market crash (sudden collective price drop)",
          "field_b_term": "First-order phase transition (spinodal decomposition)",
          "note": "Market crashes occur when trend-follower density crosses a critical threshold, triggering a first-order-like transition from a high-price metastable state to a low-price state - analogous to spinodal decomposition in a supersaturated liquid.\n"
        },
        {
          "field_a_term": "Fat-tailed return distribution (excess kurtosis)",
          "field_b_term": "Power-law tail of correlated random walk (Levy distribution)",
          "note": "Trader interaction (herding) produces correlated returns with power-law tails (Pareto exponent ~3); this matches empirical stock return distributions and is explained by the collective dynamics of interacting agents near a critical point.\n"
        },
        {
          "field_a_term": "Volatility clustering (GARCH effects)",
          "field_b_term": "Long-range temporal correlations near criticality",
          "note": "Volatility correlation (large moves follow large moves) arises from the slow relaxation of the agent opinion distribution near the critical point, analogous to critical slowing down in equilibrium phase transitions.\n"
        }
      ],
      "references": [
        {
          "doi": "10.1038/17616",
          "note": "Lux & Marchesi (1999) - scaling and criticality in a stochastic multi-agent model of a financial market; Nature 397:498"
        },
        {
          "doi": "10.1016/0167-2789(96)00094-8",
          "note": "Kauffman (1995) - LLS model: fundamentalist-chartist dynamics; Physica D"
        }
      ],
      "last_reviewed": "2026-05-07",
      "file": "cross-domain/physics-economics/b-agent-based-models-x-emergent-markets.yaml"
    },
    {
      "id": "b-blackscholes-x-diffusion-equation",
      "title": "Black-Scholes x Heat diffusion equation — option pricing as Brownian motion\n",
      "source_domain": "",
      "target_domain": "",
      "status": "proposed",
      "bridge_claim": "The Black-Scholes partial differential equation for option pricing is mathematically identical to the heat diffusion equation after a change of variables; option price maps to temperature, log-price maps to position, and volatility squared maps to thermal diffusivity — enabling physicists' Green's f",
      "open_unknowns": [],
      "related_hypotheses": [],
      "communication_gap": "Financial economists derived Black-Scholes in 1973 using Ito calculus without recognizing the heat equation equivalence; physicists independently applied diffusion theory to finance (econophysics) from the 1990s, creating a second derivation tradition that financial professionals were slow to adopt despite its computational advantages.\n",
      "translation_table": [
        {
          "field_a_term": "option price C(S,t) (finance)",
          "field_b_term": "temperature field T(x,t) (heat equation)",
          "note": "After the substitution tau = T-t, x = ln(S/K), the Black-Scholes PDE becomes the standard heat equation"
        },
        {
          "field_a_term": "volatility sigma (finance)",
          "field_b_term": "sqrt(2 * thermal diffusivity) (physics)",
          "note": "Volatility sets the rate at which the probability distribution of log-prices spreads, exactly as thermal diffusivity spreads heat"
        },
        {
          "field_a_term": "risk-neutral pricing measure Q (finance)",
          "field_b_term": "Feynman-Kac path integral (physics)",
          "note": "The risk-neutral expectation of discounted payoff is the path-integral solution to the heat equation with appropriate boundary conditions"
        },
        {
          "field_a_term": "Black-Scholes delta hedge (finance)",
          "field_b_term": "gradient of temperature field (physics)",
          "note": "The delta (dC/dS) plays the role of the temperature gradient in the heat equation formulation"
        }
      ],
      "references": [
        {
          "doi": "10.1086/260062",
          "note": "Black & Scholes (1973) - The pricing of options and corporate liabilities; J Political Economy 81:637"
        }
      ],
      "last_reviewed": "2026-05-07",
      "file": "cross-domain/physics-economics/b-blackscholes-x-diffusion-equation.yaml"
    },
    {
      "id": "b-chemical-potential-utility-maximization",
      "title": "Chemical potential equalization at thermodynamic equilibrium is formally identical to marginal utility equalization in consumer utility maximization: both are gradient-descent conditions on the same class of strictly convex potential function, uniting thermodynamics and neoclassical economics through the mathematics of convex optimization\n",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "At thermodynamic equilibrium, the chemical potential μᵢ = (∂G/∂nᵢ)_{T,P} equalizes across all coexisting phases (μᵢᵅ = μᵢᵝ), minimizing the Gibbs free energy G(T,P,{nᵢ}); in consumer theory, utility maximization subject to a budget constraint requires equalizing marginal utilities per dollar MUᵢ/pᵢ ",
      "open_unknowns": [],
      "related_hypotheses": [],
      "communication_gap": "The formal mathematical equivalence between thermodynamic and economic equilibrium was recognized by Samuelson (1947) and explored by Georgescu-Roegen (1971), but remains outside standard training in both thermodynamics and economics curricula. Thermodynamicists rarely study microeconomic theory; economists rarely encounter the Gibbs-Duhem equation. The connection is treated as analogy in interdisciplinary texts rather than as a structural identity with predictive implications.\n",
      "translation_table": [
        {
          "field_a_term": "chemical potential μᵢ = ∂G/∂nᵢ (thermodynamics)",
          "field_b_term": "marginal utility per dollar MUᵢ/pᵢ (economics)",
          "note": "Both measure the incremental gain in the objective (G or -U) from adding one unit of component i; equalization across phases/goods is the equilibrium condition in both frameworks"
        },
        {
          "field_a_term": "Gibbs free energy G(T,P,{nᵢ}) minimization (thermodynamics)",
          "field_b_term": "utility U({xᵢ}) maximization subject to budget constraint (economics)",
          "note": "Both are convex optimization problems on the same mathematical class; the Legendre transform relates thermodynamic potentials just as expenditure and indirect utility functions are Legendre duals"
        },
        {
          "field_a_term": "phase coexistence — phases α and β at equilibrium (thermodynamics)",
          "field_b_term": "market equilibrium — supply equals demand at price vector p* (economics)",
          "note": "Phase coexistence requires μᵢᵅ = μᵢᵝ for all species i; Walrasian equilibrium requires MUᵢ/pᵢ = λ for all goods i; both enforce the same equimarginal principle"
        },
        {
          "field_a_term": "activity coefficient γᵢ correcting for non-ideal mixing (thermodynamics)",
          "field_b_term": "price distortion or externality shifting marginal cost from price (economics)",
          "note": "Activity coefficients modify ideal chemical potential just as taxes/subsidies modify the effective price signal in utility maximization"
        },
        {
          "field_a_term": "Le Chatelier's principle — system resists perturbation by shifting equilibrium (thermodynamics)",
          "field_b_term": "substitution effect — consumers substitute away from price-increased goods (economics)",
          "note": "Both are second-order stability conditions on the convex potential; Slutsky matrix negative semi-definiteness is the economic analog of thermodynamic stability inequalities"
        }
      ],
      "references": [
        {
          "doi": "10.2307/1907385",
          "note": "Samuelson (1960) — Structure of a minimum equilibrium system; formal thermodynamic analogy in economics"
        },
        {
          "doi": "10.1093/biomet/6.1.1",
          "note": "Georgescu-Roegen (1971) — The Entropy Law and the Economic Process; thermodynamics–economics duality"
        },
        {
          "doi": "10.1016/j.physa.2007.01.042",
          "note": "Mimkes (2006) — A thermodynamic formulation of economics; chemical potential as marginal utility with derivations"
        },
        {
          "doi": "10.1016/S0378-4371(99)00286-4",
          "note": "Foley (1994) — A statistical equilibrium theory of markets; derives economic equilibrium from maximum entropy principle"
        }
      ],
      "last_reviewed": "2026-05-07",
      "file": "cross-domain/physics-economics/b-chemical-potential-utility-maximization.yaml"
    },
    {
      "id": "b-entropy-maximization-x-income-distribution",
      "title": "Maximum entropy x Income distribution - Boltzmann-Gibbs distribution of wealth\n",
      "source_domain": "",
      "target_domain": "",
      "status": "proposed",
      "bridge_claim": "The equilibrium income distribution in a closed economy with random pairwise wealth exchanges follows the Boltzmann-Gibbs exponential distribution — the same maximum entropy distribution as particle energies in an ideal gas at thermal equilibrium; deviations (Pareto power-law tails for the wealthy) ",
      "open_unknowns": [],
      "related_hypotheses": [],
      "communication_gap": "Economists studying income distribution (Pareto 1897, Gibrat 1931) and statistical physicists studying maximum entropy distributions worked independently; Dragulescu and Yakovenko (2001) demonstrated the ideal gas analogy for income distributions, launching econophysics as a field — but mainstream economics resists the approach, arguing that agents have preferences and strategic behavior rather than random collision dynamics, leaving the validity boundary unresolved.\n",
      "translation_table": [
        {
          "field_a_term": "income distribution P(m) in market economy (economics)",
          "field_b_term": "Maxwell-Boltzmann energy distribution P(E) in ideal gas (statistical mechanics)",
          "note": "For random exchange models, P(m) = (1/T) exp(-m/T) where T is mean income; identical to ideal gas energy distribution"
        },
        {
          "field_a_term": "pairwise wealth transfer in market transaction (economics)",
          "field_b_term": "binary elastic collision conserving total energy (statistical mechanics)",
          "note": "Random money exchange (Dragulescu-Yakovenko) maps to elastic collisions; money-conservation maps to energy-conservation"
        },
        {
          "field_a_term": "Gini coefficient measuring income inequality (economics)",
          "field_b_term": "entropy deficit from maximum entropy Boltzmann distribution (stat mech)",
          "note": "The Gini coefficient for Boltzmann distribution = 0.5; deviations above 0.5 indicate wealth condensation analogous to BEC"
        },
        {
          "field_a_term": "Pareto power-law tail (top 1% wealth) (economics)",
          "field_b_term": "Bose-Einstein condensation in wealth distribution models (statistical mechanics)",
          "note": "Saving propensity lambda above critical value causes power-law tail; condensate fraction = (lambda - lambda_c)/lambda"
        }
      ],
      "references": [
        {
          "doi": "10.1140/epjb/e2002-00346-7",
          "note": "Dragulescu & Yakovenko (2001) - Evidence for the exponential distribution of income in the USA; Eur Phys J B 20:585"
        }
      ],
      "last_reviewed": "2026-05-07",
      "file": "cross-domain/physics-economics/b-entropy-maximization-x-income-distribution.yaml"
    },
    {
      "id": "b-entropy-production-market-irreversibility",
      "title": "Non-equilibrium statistical mechanics ↔ financial market irreversibility — entropy production in price dynamics",
      "source_domain": "",
      "target_domain": "",
      "status": "proposed",
      "bridge_claim": "Financial markets are fundamentally irreversible dynamical systems: transaction costs, bid-ask spreads, market impact, and information asymmetry make price dynamics time-asymmetric — the statistical distribution of forward price paths differs from that of time-reversed paths. This irreversibility is",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-arbitrage-as-entropy-decreasing-fluctuation"
      ],
      "communication_gap": "Non-equilibrium statistical mechanics (Jarzynski, Crooks, Seifert) is a rapidly developing physics field largely unknown to financial economists. Econophysics researchers (Mantegna, Stanley, Sornette) have made the equilibrium statistical mechanics analogy (partition function ↔ option price) well-known, but the non-equilibrium extension — where irreversibility is the central concept — remains a frontier. The Malkiel efficient market hypothesis is a verbal equilibrium claim; the non-equilibrium framework provides a quantitative test of departures from it.\n",
      "translation_table": [
        {
          "field_a_term": "entropy production rate dS/dt > 0",
          "field_b_term": "market irreversibility / time-asymmetry of returns",
          "note": "Measured via KL divergence between forward and time-reversed return distributions"
        },
        {
          "field_a_term": "Kullback-Leibler divergence D_KL(P_fwd||P_rev)",
          "field_b_term": "statistical arrow of time in price dynamics",
          "note": "Patzelt & Pawelzik (2013) measured this in forex markets; found significant irreversibility"
        },
        {
          "field_a_term": "Crooks fluctuation theorem P(-ΔS)/P(+ΔS) = e^{-ΔS}",
          "field_b_term": "arbitrage probability decays exponentially with profit size",
          "note": "Large apparent arbitrage opportunities are exponentially rare — efficient market as thermodynamic law"
        },
        {
          "field_a_term": "Jarzynski equality ⟨e^{-W/kT}⟩ = e^{-ΔF/kT}",
          "field_b_term": "risk-neutral pricing of derivatives (no-arbitrage condition)",
          "note": "Both are exact equalities relating non-equilibrium averages to equilibrium quantities"
        },
        {
          "field_a_term": "free energy F = U − TS (Helmholtz)",
          "field_b_term": "risk-adjusted price (price minus risk premium)",
          "note": "Proposed analogy: free energy as fair price; entropy as risk; internal energy as expected payoff"
        },
        {
          "field_a_term": "equilibrium thermal fluctuations (kT scale)",
          "field_b_term": "market microstructure noise (tick size scale)",
          "note": "Both set the floor for irreducible fluctuations in their respective systems"
        }
      ],
      "references": [
        {
          "doi": "10.1103/PhysRevE.60.2721",
          "note": "Crooks (1999) Phys Rev E 60:2721 — fluctuation theorem"
        },
        {
          "doi": "10.1103/PhysRevLett.78.2690",
          "note": "Jarzynski (1997) Phys Rev Lett 78:2690 — non-equilibrium work relation"
        },
        {
          "doi": "10.1257/089533003321164958",
          "note": "Malkiel (2003) J Econ Perspect 17:59 — efficient market hypothesis survey"
        },
        {
          "doi": "10.1371/journal.pone.0054250",
          "note": "Patzelt & Pawelzik (2013) PLOS ONE 8:e54250 — empirical irreversibility in financial markets"
        }
      ],
      "last_reviewed": "2026-05-06",
      "file": "cross-domain/physics-economics/b-entropy-production-market-irreversibility.yaml"
    },
    {
      "id": "b-green-kubo-correlations-x-return-volatility-memory",
      "title": "Green–Kubo fluctuation–dissipation links between equilibrium time correlations and transport coefficients ↔ autocorrelation structure of returns and volatility clustering in market microstructure (statistical physics ↔ finance; partly speculative)\n",
      "source_domain": "",
      "target_domain": "",
      "status": "proposed",
      "bridge_claim": "Green–Kubo relations express transport coefficients as integrals of equilibrium current–current correlators. Empirical finance documents long-memory and clustering in absolute returns, motivating loose parallels to correlation integrals and effective “response” functions. **Speculative layer:** mark",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-volatility-autocorrelation-satisfies-effective-fd-response"
      ],
      "communication_gap": "Physicists assume detailed balance regimes; markets exhibit strategic feedback and regime shifts. Joint papers exist in econophysics but mainstream finance and mathematical physics still talk past each other on identifiability.\n",
      "translation_table": [
        {
          "field_a_term": "equilibrium time-correlation function <J(t)J(0)>",
          "field_b_term": "return autocovariance function (possibly absolute returns)",
          "note": "Analogy of correlation integral structure, not equality of observables."
        },
        {
          "field_a_term": "transport coefficient from integrated correlator",
          "field_b_term": "realized variance integrated over horizons (rough analogy)",
          "note": "Risk horizons echo correlation time integrals only heuristically."
        },
        {
          "field_a_term": "fluctuation–dissipation temperature",
          "field_b_term": "effective noise intensity in stochastic volatility models",
          "note": "Parameter identification is not a physical temperature."
        }
      ],
      "references": [
        {
          "doi": "10.1143/JPSJ.12.570",
          "note": "Kubo (1957) — fluctuation–dissipation theorem (statistical mechanics anchor)."
        },
        {
          "doi": "10.1038/35102715",
          "note": "Mantegna & Stanley (1995) — scaling behaviour of economic dynamics (Nature)."
        }
      ],
      "last_reviewed": "2026-05-07",
      "file": "cross-domain/physics-economics/b-green-kubo-correlations-x-return-volatility-memory.yaml"
    },
    {
      "id": "b-kinetic-theory-wealth-distribution",
      "title": "Kinetic theory of gases and wealth distribution — random pairwise energy/wealth exchange produces exponential (Boltzmann-Gibbs) equilibrium distributions in both gases and simplified economies",
      "source_domain": "",
      "target_domain": "",
      "status": "proposed",
      "bridge_claim": "The Boltzmann-Gibbs distribution of kinetic energy in ideal gases maps onto wealth distributions in simplified random exchange models. In a gas, molecules exchange energy randomly in two-body collisions conserving total energy — equilibrium P(E) ∝ exp(-E/kT). In a simplified economy where N agents r",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-two-class-economy-boltzmann-pareto-transition"
      ],
      "communication_gap": "Mainstream economics dismissed early statistical physics approaches to wealth distribution (econophysics) as oversimplified. The field of econophysics (Mantegna, Stanley, Yakovenko) remains marginal in economics departments despite successful empirical predictions. Economists prefer agent-based models with rationality assumptions; physicists prefer minimal models with emergent distributions. The connection between Pareto's original power law observation and kinetic theory was not made until the late 1990s.\n",
      "translation_table": [
        {
          "field_a_term": "kinetic energy E of gas molecule",
          "field_b_term": "wealth W_i of economic agent",
          "note": "Both are non-negative conserved quantities exchanged pairwise"
        },
        {
          "field_a_term": "temperature kT = mean kinetic energy",
          "field_b_term": "mean wealth ⟨W⟩ (economic temperature)",
          "note": "Both set the scale of the equilibrium distribution; inequality ∝ 1/kT"
        },
        {
          "field_a_term": "elastic collision (energy conserving)",
          "field_b_term": "fair exchange (wealth conserving transaction)",
          "note": "Simplest model; real transactions have friction, information asymmetry"
        },
        {
          "field_a_term": "Maxwell-Boltzmann distribution P(v)",
          "field_b_term": "exponential income distribution (lower tail)",
          "note": "Observed in lower 90% of US income distribution by Dragulescu & Yakovenko"
        },
        {
          "field_a_term": "multiplicative stochastic process (geometric Brownian motion)",
          "field_b_term": "investment returns (compounding wealth)",
          "note": "Produces log-normal or power-law distribution; Pareto tail from asset ownership"
        },
        {
          "field_a_term": "entropy maximization (Boltzmann's H-theorem)",
          "field_b_term": "maximum entropy principle for wealth distribution",
          "note": "Both derive the equilibrium distribution from entropy maximization with constraint"
        }
      ],
      "references": [
        {
          "doi": "10.1007/s100510050292",
          "note": "Dragulescu & Yakovenko (2000) Eur Phys J B 17:723 — Boltzmann-Gibbs distribution of money"
        },
        {
          "doi": "10.1007/s100510050276",
          "note": "Chakraborti & Chakrabarti (2000) Eur Phys J B 17:167 — statistical mechanics of money"
        },
        {
          "note": "Pareto (1897) Cours d'économie politique — original power-law wealth distribution observation"
        },
        {
          "doi": "10.1016/S0378-4371(00)00205-3",
          "note": "Bouchaud & Mézard (2000) Physica A 282:536 — wealth condensation in simple models"
        }
      ],
      "last_reviewed": "2026-05-06",
      "file": "cross-domain/physics-economics/b-kinetic-theory-wealth-distribution.yaml"
    },
    {
      "id": "b-lyapunov-divergence-x-bank-run-amplification",
      "title": "Positive Lyapunov exponents and finite-time divergence in dynamical systems ↔ feedback amplification and panic acceleration in bank-run models (dynamical systems ↔ economics; heavy caveats)\n",
      "source_domain": "",
      "target_domain": "",
      "status": "proposed",
      "bridge_claim": "Classical bank-run models (Diamond–Dybvig style) and their modern network extensions can exhibit multiple equilibria and sharp transitions when beliefs or liquidity shocks cross thresholds. Nearby trajectories in stylized belief–withdrawal dynamics can separate rapidly, echoing finite-time sensitivi",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-bank-run-lyapunov-time-shrinks-with-public-information-leaks"
      ],
      "communication_gap": "Heavy caveats. Economists emphasize beliefs, institutions, and game theory; physicists emphasize attractors and ergodicity assumptions that rarely hold in social data. Cross-talk risks sounding glib without institutional detail.\n",
      "translation_table": [
        {
          "field_a_term": "Lyapunov exponent lambda > 0",
          "field_b_term": "rapid amplification of withdrawal rates after a credibility shock",
          "note": "Metaphorical unless a validated low-dimensional reduction exists."
        },
        {
          "field_a_term": "basin of attraction",
          "field_b_term": "self-fulfilling run equilibrium region",
          "note": "Multiple equilibria complicate direct dynamical-systems language."
        },
        {
          "field_a_term": "bifurcation parameter crossing",
          "field_b_term": "policy threshold where insurance credibility flips",
          "note": "Structural breaks dominate smooth chaos pictures."
        }
      ],
      "references": [
        {
          "doi": "10.2307/1884828",
          "note": "Diamond & Dybvig (1983) — bank runs, deposit insurance, and liquidity (JPE)."
        },
        {
          "doi": "10.1038/35102715",
          "note": "Mantegna & Stanley (1995) — scaling behaviour in economic dynamics (Nature)."
        }
      ],
      "last_reviewed": "2026-05-07",
      "file": "cross-domain/physics-economics/b-lyapunov-divergence-x-bank-run-amplification.yaml"
    },
    {
      "id": "b-minority-game-market-microstructure",
      "title": "The minority game (Challet–Zhang) is an exactly solvable model of financial market competition whose phase transition at critical ratio α_c = P/N reproduces the efficient market boundary — spin glass theory via the replica method provides the analytic solution.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "The minority game (Challet & Zhang 1997): N agents repeatedly choose between two options (buy/sell); agents in the minority win — capturing the essence of financial competition: if everyone does the same thing, nobody profits. The key parameter is α = P/N where P = 2^m is the number of distinct mark",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-minority-game-hft-phase-transition"
      ],
      "communication_gap": "Statistical physicists (Challet, Zhang, Coolen) published minority game results in physics journals (Physica A, PRL) with replica method notation unfamiliar to economists. Economists familiar with game theory and market microstructure rarely read Physica A. The finance literature on market efficiency remained largely unaware of the exact phase transition result for two decades, despite its direct relevance to the EMH debate.\n",
      "translation_table": [
        {
          "field_a_term": "spin glass (disordered magnetic system with quenched disorder)",
          "field_b_term": "financial market with heterogeneous strategy-holding agents",
          "note": "quenched disorder = irreversible strategy assignments at game start"
        },
        {
          "field_a_term": "replica method (spin glass analytic solution)",
          "field_b_term": "exact solution of minority game equilibrium statistics",
          "note": "replica trick averages over strategy disorder to compute global market statistics"
        },
        {
          "field_a_term": "paramagnetic phase (disordered spins, T > T_c)",
          "field_b_term": "efficient market phase (random fluctuations, no exploitable patterns)"
        },
        {
          "field_a_term": "spin glass phase (frozen disorder, T < T_c)",
          "field_b_term": "herding/trend phase (persistent patterns, exploitable inefficiency)"
        },
        {
          "field_a_term": "critical temperature T_c",
          "field_b_term": "α_c = P/N (strategies per agent) — EMH boundary"
        },
        {
          "field_a_term": "volatility in spin glass phase",
          "field_b_term": "excess volatility in financial markets (volatility puzzle)"
        },
        {
          "field_a_term": "frustration (competing interactions in spin glass)",
          "field_b_term": "conflicting strategies among agents — no stable equilibrium"
        }
      ],
      "references": [
        {
          "doi": "10.1016/S0378-4371(97)00419-6",
          "note": "Challet & Zhang (1997) Emergence of cooperation and organization in an evolutionary game. Physica A 246:407"
        },
        {
          "doi": "10.1103/PhysRevLett.82.2203",
          "note": "Savit et al. (1999) Minority games and the structure of complex information. Phys Rev Lett 82:2203"
        },
        {
          "note": "Coolen (2005) The Mathematical Theory of Minority Games. Oxford University Press"
        },
        {
          "note": "Challet, Marsili & Zhang (2004) Minority Games. Oxford University Press"
        }
      ],
      "last_reviewed": "2026-05-06",
      "file": "cross-domain/physics-economics/b-minority-game-market-microstructure.yaml"
    },
    {
      "id": "b-minority-game-x-market-microstructure",
      "title": "Minority game ↔ Market microstructure — agent heterogeneity as market efficiency",
      "source_domain": "",
      "target_domain": "",
      "status": "proposed",
      "bridge_claim": "The minority game (Challet & Zhang 1997) — where agents must independently choose the minority side to win — produces a phase transition between efficient (random) and inefficient (exploitable) markets as a function of agent memory; this maps directly to market microstructure theory of informed vs n",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-minority-game-x-market-microstructure"
      ],
      "communication_gap": "The minority game was invented by physicists (Challet & Zhang 1997, Physica A) and analysed using spin glass theory and replica methods — methods entirely foreign to financial economists. Market microstructure theory (Kyle 1985, Glosten-Milgrom 1985) uses game theory and information economics. The two communities independently developed nearly identical models of informed-vs-noise-trader markets without cross-citation, despite the minority game being an exact mathematical model of the Kyle framework.",
      "translation_table": [
        {
          "field_a_term": "minority game memory m (number of past outcomes agents remember)",
          "field_b_term": "market information ratio α = P/N (prediction resources per agent)",
          "note": "Phase transition at α_c ≈ 0.34; below α_c: inefficient (exploitable); above: efficient (random)"
        },
        {
          "field_a_term": "minority game phase transition (from exploitable to random phase)",
          "field_b_term": "EMH strong-to-weak efficiency transition in market microstructure",
          "note": "Below threshold: price predictability exists (technical analysis profitable); above: random walk"
        },
        {
          "field_a_term": "minority game strategy (mapping from history to binary action)",
          "field_b_term": "trading rule / technical analysis indicator in market microstructure",
          "note": "Agents with more strategies correspond to informed traders with better prediction capacity"
        },
        {
          "field_a_term": "attendance fluctuation σ² in minority game (volatility proxy)",
          "field_b_term": "market price volatility (bid-ask spread, trading volume fluctuation)",
          "note": "Volatility minimum at phase transition point α_c — maximum market efficiency corresponds to minimum exploitability"
        }
      ],
      "references": [
        {
          "doi": "10.1016/S0378-4371(97)00419-6",
          "note": "Challet & Zhang (1997) — emergence of cooperation and organisation in an evolutionary game; Physica A 246:407"
        },
        {
          "doi": "10.2307/1833139",
          "note": "Kyle (1985) — continuous auctions and insider trading; Econometrica 53:1315"
        }
      ],
      "last_reviewed": "2026-05-07",
      "file": "cross-domain/physics-economics/b-minority-game-x-market-microstructure.yaml"
    },
    {
      "id": "b-rational-inattention-x-entropy",
      "title": "Rational Inattention x Shannon Entropy - cognitive bandwidth as information cost\n",
      "source_domain": "",
      "target_domain": "",
      "status": "proposed",
      "bridge_claim": "Sims' rational inattention model formalizes attention as a scarce cognitive resource with Shannon mutual information as the cost; optimal attention allocation under entropy cost produces price stickiness, infrequent adjustment, and lumpy consumption - predictions that match macroeconomic data better",
      "open_unknowns": [],
      "related_hypotheses": [],
      "communication_gap": "Shannon information theory and macroeconomics have largely developed in isolation; Sims (2003) introduced mutual information cost to economics, but most macro models still use rational expectations with full information, ignoring information acquisition costs that are central to signal processing theory.\n",
      "translation_table": [
        {
          "field_a_term": "Cognitive bandwidth (attention capacity)",
          "field_b_term": "Channel capacity C (bits per unit time)",
          "note": "An agent can process at most C bits per unit time about the state of the world; this cap on information processing is identical to Shannon's channel capacity limit, making attention a scarce information-theoretic resource.\n"
        },
        {
          "field_a_term": "Prior distribution over economic states",
          "field_b_term": "Prior distribution at channel input",
          "note": "The agent's prior over wages, prices, productivity is the channel input distribution; Bayesian updating under capacity constraint gives the posterior, which is capacity-achieving only for specific input distributions.\n"
        },
        {
          "field_a_term": "Endogenous inattention (ignoring small price changes)",
          "field_b_term": "Quantization noise / coarse-grained channel output",
          "note": "Under binding capacity constraints, the agent optimally ignores small signals (below the quantization threshold) and responds only to large changes - producing the lumpy adjustment and price stickiness observed in micro data.\n"
        },
        {
          "field_a_term": "Optimal attention allocation across goods",
          "field_b_term": "Water-filling power allocation across parallel channels",
          "note": "The Shannon water-filling theorem describes optimal power allocation to parallel channels; rational inattention implies the analogous attention allocation across different economic variables, concentrating attention on highest-variance goods.\n"
        }
      ],
      "references": [
        {
          "doi": "10.1016/j.jmoneco.2003.06.002",
          "note": "Sims (2003) - implications of rational inattention; J Monetary Economics 50:665"
        },
        {
          "doi": "10.1016/j.jmoneco.2010.05.006",
          "note": "Mackowiak & Wiederholt (2009) - optimal sticky prices under rational inattention"
        }
      ],
      "last_reviewed": "2026-05-07",
      "file": "cross-domain/physics-economics/b-rational-inattention-x-entropy.yaml"
    },
    {
      "id": "b-schawlow-townes-linewidth-x-leeson-oscillator-phase-noise",
      "title": "Laser cavity linewidth obeys Schawlow–Townes quantum-limited scaling tying linewidth to cavity lifetime and photon number — electronic oscillators exhibit phase-noise spectra shaped by device noise floors plus feedback-loop filtering often summarized by Leeson’s heuristic spectrum with corner frequencies — bridges quantum optics linewidth budgets with RF/microwave PLL spectral purity engineering.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "Below saturation, laser linewidth Δν_ST scales as inverse cavity photon number times cavity loss rate — phase-locked loops and crystal oscillators display 1/f³, 1/f², 1/f slope segments where feedback bandwidth sets apparent linewidth corners analogous (operationally) to cavity filtering — distincti",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-identical-analyzer-method-noise-floor-dominated-regimes-match-at-mm-wave-carriers"
      ],
      "communication_gap": "Photonics journals cite Henry factor enhancements while microwave CAD emphasizes phase-noise simulation — unified seminars rare despite identical spectrum analyzer measurements.\n",
      "translation_table": [
        {
          "field_a_term": "Schawlow–Townes linewidth Δν_ST ∝ 1/(n_ph cavity photons)",
          "field_b_term": "Phase-noise pedestal scaling with carrier offset frequency in PLL loops",
          "note": "Same spectral purity obsession — underlying physics differs (spontaneous emission vs flicker/additive noise)."
        },
        {
          "field_a_term": "Cavity Q / photon lifetime τ_c",
          "field_b_term": "Leeson corner frequency tied to loop bandwidth / delay-line resonator Q",
          "note": "Quality-factor vocabulary crosses domains with careful calibration meaning shifts."
        },
        {
          "field_a_term": "Laser linewidth enhancement factor α² (Henry factor)",
          "field_b_term": "Nonlinear noise conversion / AM–PM coupling in active devices",
          "note": "Both disciplines correct naive linewidth formulas with empirical enhancement phenomenology."
        }
      ],
      "references": [
        {
          "doi": "10.1103/PhysRev.112.1940",
          "note": "Schawlow & Townes (1958) — infrared and optical masers / linewidth relations"
        },
        {
          "doi": "10.1109/PROC.1966.5219",
          "note": "Leeson (1966) — simple model of oscillator noise spectra (IEEE Proc.)"
        }
      ],
      "last_reviewed": "2026-05-07",
      "file": "cross-domain/physics-electrical-engineering/b-schawlow-townes-linewidth-x-leeson-oscillator-phase-noise.yaml"
    },
    {
      "id": "b-acoustics-room-design",
      "title": "Sabine's reverberation formula (T₆₀ = 0.161V/A, 1900) bridges physical wave acoustics with architectural engineering, enabling quantitative concert hall design through measurable psychoacoustic correlates (IACC, early decay time) of perceived sound quality.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "Room acoustics quantifies the interaction between sound waves and architectural geometry. Sabine (1900) measured reverberation time T₆₀ (time for sound to decay 60 dB) in Harvard lecture halls and derived T₆₀ = 0.161·V/A where V is room volume and A = Σ αᵢSᵢ is total absorption (αᵢ = absorption coef",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-room-acoustic-quality-predictable-from-geometry"
      ],
      "communication_gap": "Architectural acoustics sits in engineering schools; wave physics of room resonances is taught in physics departments; perceptual psychoacoustics is in psychology or music departments. Practicing architects rarely have acoustics training; acoustic consultants are hired late in the design process when major geometry is fixed. The academic literature on psychoacoustics (JASA) and the professional literature on concert hall design rarely cite each other.\n",
      "translation_table": [
        {
          "field_a_term": "sound energy decay exp(-t/τ)",
          "field_b_term": "reverberation time T₆₀ = 6.91τ (engineering specification)",
          "note": "Physics gives exponential decay; Sabine parameterized this as the practical T₆₀ specification"
        },
        {
          "field_a_term": "wave equation ∇²p = (1/c²)∂²p/∂t²",
          "field_b_term": "normal modes (room resonances) — frequency-domain room behavior",
          "note": "Eigenfunctions of the wave equation in a rectangular room give the modal structure"
        },
        {
          "field_a_term": "impedance mismatch at boundary",
          "field_b_term": "absorption coefficient α = 1 - |r|² (r = reflection coefficient)",
          "note": "Materials science determines α; physics determines the relationship to impedance"
        },
        {
          "field_a_term": "binaural cross-correlation IACC",
          "field_b_term": "perceived spaciousness / envelopment in concert halls",
          "note": "Beranek's correlation: IACC < 0.5 associated with preferred spaciousness — perceptual physics"
        },
        {
          "field_a_term": "geometric acoustics (ray tracing)",
          "field_b_term": "room acoustic simulation software (ODEON, CATT-Acoustic)",
          "note": "High-frequency limit of wave acoustics; used for architectural design before construction"
        }
      ],
      "references": [
        {
          "doi": "10.2307/25058007",
          "note": "Sabine (1900) — Reverberation, Am Arch 68:3; founding paper of quantitative room acoustics"
        },
        {
          "note": "Beranek (1962) Music, Acoustics and Architecture, Wiley — IACC and concert hall correlations"
        },
        {
          "note": "Kuttruff (2009) Room Acoustics, 5th ed., Spon Press"
        },
        {
          "note": "Cox & D'Antonio (2004) Acoustic Absorbers and Diffusers, Spon Press"
        }
      ],
      "last_reviewed": "2026-05-06",
      "file": "cross-domain/physics-engineering/b-acoustics-room-design.yaml"
    },
    {
      "id": "b-chaos-synchronization-pecora-carroll",
      "title": "Chaotic oscillators can be synchronized by unidirectional coupling (Pecora-Carroll synchronization) when the conditional Lyapunov exponents of the driven system are all negative, enabling secure communications, coordinated sensor networks, and biological rhythm entrainment\n",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "Pecora & Carroll (1990) demonstrated that a chaotic drive system (x-subsystem) can force a response system (y-subsystem with identical equations) into identical synchrony x(t) = y(t) when all conditional Lyapunov exponents (CLEs) of the y-subsystem, with x(t) as the driving signal, are negative (λ_m",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-pecora-carroll-synchronization-noise-tolerance-lyapunov"
      ],
      "communication_gap": "Nonlinear physicists study chaos synchronization theoretically while electrical engineers design communications and control systems; the Pecora-Carroll framework is well-known in the nonlinear dynamics community but its practical application in secure communications has been limited by the finding that chaos-based encryption is not cryptographically secure against waveform analysis.\n",
      "translation_table": [
        {
          "field_a_term": "conditional Lyapunov exponents λ_CLE of driven subsystem (physics)",
          "field_b_term": "stability eigenvalues of the response system's linearization along the chaotic attractor (physics)",
          "note": "λ_max^CLE < 0 ensures exponential convergence of response trajectory to drive trajectory; synchronization manifold is stable"
        },
        {
          "field_a_term": "Pecora-Carroll identical synchronization x(t)=y(t) (physics)",
          "field_b_term": "zero-error tracking of drive signal by response system (engineering)",
          "note": "Synchronization is exact for identical systems; generalized synchronization y = Φ(x) holds for non-identical systems"
        },
        {
          "field_a_term": "message masking in chaotic carrier (physics)",
          "field_b_term": "chaos-based spread-spectrum modulation for covert communications (engineering)",
          "note": "Signal s(t) is added to chaotic carrier x(t); receiver synchronizes to x(t) and subtracts it, recovering s(t) — requires λ_max^CLE < 0"
        },
        {
          "field_a_term": "basin of synchronization in coupling parameter space (physics)",
          "field_b_term": "engineering design constraint: minimum coupling strength for synchronization (engineering)",
          "note": "Coupling strength κ must exceed threshold κ_c where λ_max^CLE first becomes negative; engineering design requires κ >> κ_c for robustness"
        }
      ],
      "references": [
        {
          "doi": "10.1103/PhysRevLett.64.821",
          "note": "Pecora & Carroll (1990) - synchronization in chaotic systems"
        },
        {
          "doi": "10.1038/35023206",
          "note": "Pecora et al. (2014) - cluster synchronization and isolated desynchronization in complex networks"
        }
      ],
      "last_reviewed": "2026-05-07",
      "file": "cross-domain/physics-engineering/b-chaos-synchronization-pecora-carroll.yaml"
    },
    {
      "id": "b-compressible-shock-x-traffic-shock-wave",
      "title": "Compressible gas dynamics describes shocks as discontinuities satisfying Rankine–Hugoniot jump conditions across characteristics — Lighthill–Whitham macroscopic traffic models treat vehicle density similarly, yielding kinematic shock waves propagating backward through queues — sharing hyperbolic conservation-law structure despite vastly different constitutive flux-density relations.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "Both Euler shocks and LWR traffic shocks arise where characteristics intersect in hyperbolic conservation laws ∂ρ/∂t + ∂q/∂x = 0 with closure q(ρ). Rankine–Hugoniot speeds match observed jam propagation upstream — analogous to shock speeds from mass/momentum jumps in gases though microscopic physics",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-compressible-shock-x-traffic-shock-wave"
      ],
      "communication_gap": "Applied math courses teach gas shocks first; transportation departments teaching LWR often omit explicit naming of Rankine–Hugoniot despite identical algebra — causing missed transfer when students cross-register.\n",
      "translation_table": [
        {
          "field_a_term": "Shock speed from RH jumps (gas dynamics)",
          "field_b_term": "Jam propagation speed for density discontinuities (traffic)",
          "note": "Same weak-solution jump conditions across conservation-law interfaces."
        },
        {
          "field_a_term": "Rarefaction fan expansion waves",
          "field_b_term": "Acceleration waves dispersing congested regions after green-light releases",
          "note": "Mathematical structure parallel though traffic uses concave/convex fundamental diagrams with varied shapes."
        },
        {
          "field_a_term": "Riemann problem initial discontinuity",
          "field_b_term": "Sudden braking front vs open road traffic initial condition",
          "note": "Shared textbook exercise framing for hyperbolic PDE courses cross-listed between departments."
        }
      ],
      "references": [
        {
          "doi": "10.1098/rspa.1955.0068",
          "note": "Lighthill & Whitham (1955) Proc. R. Soc. — kinematic wave theory on roads (traffic shock foundations)"
        },
        {
          "doi": "10.1017/CBO9780511791253",
          "note": "LeVeque — Finite Volume Methods for Hyperbolic Problems (shared conservation-law numerics)"
        }
      ],
      "last_reviewed": "2026-05-07",
      "file": "cross-domain/physics-engineering/b-compressible-shock-x-traffic-shock-wave.yaml"
    },
    {
      "id": "b-johnson-nyquist-equilibrium-fluctuations-x-rf-noise-figure-definition",
      "title": "Johnson–Nyquist voltage fluctuations in resistors at temperature T set the available thermal noise power kT per hertz; RF noise figure F quantifies how much a two-port exceeds that reference — thermodynamic equilibrium noise ↔ linear receiver metrics.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "A resistor R at absolute temperature T exhibits open-circuit noise voltage spectral density S_v = 4 k T R (Nyquist–Johnson), equivalent to available noise power kT B in bandwidth B at the input of a matched resistive source. The IEEE noise figure F = (S/N)_in / (S/N)_out (with matched input) therefo",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-correlated-port-noise-matrix-lowers-effective-nf-two-port"
      ],
      "communication_gap": "Thermodynamics texts derive Nyquist–Johnson from detailed balance; microwave courses quote NF without always tracing the matched-source convention back to its equilibrium noise identity — confusing when designers mix physical temperature with noise temperature T_e.\n",
      "translation_table": [
        {
          "field_a_term": "Johnson–Nyquist spectral density S_v = 4kTR",
          "field_b_term": "input-referred noise voltage in LNA and mixer budgets",
          "note": "Same kT per hertz that defines NF reference temperature T₀ (often 290 K)."
        },
        {
          "field_a_term": "Nyquist relation linking dissipative conductance and fluctuations",
          "field_b_term": "resistive parts of antenna radiation resistance contributing thermal noise",
          "note": "Classical limit before quantum corrections dominate at mm-wave THz."
        },
        {
          "field_a_term": "equilibrium thermal noise power kTB",
          "field_b_term": "sensitivity floors in cascaded receiver Friis formulas",
          "note": "Cascaded NF algebra assumes additive excess noise above kTB reference."
        }
      ],
      "references": [
        {
          "doi": "10.1103/PhysRev.32.97",
          "note": "Johnson (1928) — thermal agitation of electricity in conductors (Phys. Rev.)."
        },
        {
          "doi": "10.1103/PhysRev.32.110",
          "note": "Nyquist (1928) — thermal agitation of electric charge in conductors (Phys. Rev.)."
        }
      ],
      "last_reviewed": "2026-05-07",
      "file": "cross-domain/physics-engineering/b-johnson-nyquist-equilibrium-noise-x-rf-noise-figure-definition.yaml"
    },
    {
      "id": "b-microfluidics-lab-on-chip",
      "title": "Microfluidics bridges physics and engineering: low Reynolds number flow, Peclet- dominated diffusion, electroosmosis, dielectrophoresis, and droplet generation enable lab-on-chip systems for single-cell RNA-seq (10x Genomics), CRISPR screening, and point-of-care diagnostics.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "At the microscale (channel dimensions L ~ 1-100 ╬╝m), fluid physics is dominated by viscosity: Reynolds number Re = ╧üvL/╬╖ << 1 ΓÇö flow is laminar, deterministic, and fully predictable by Stokes equations. The P├⌐clet number Pe = vL/D >> 1 for macromolecules: advection dominates diffusion along th",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-organ-on-chip-predicts-drug-toxicity-better-than-animal-models"
      ],
      "communication_gap": "Physicists who study Stokes flow, electroosmosis, and dielectrophoresis rarely work in biology or medicine. Biologists who use 10├ù Genomics for single-cell RNA-seq rarely engage with the fluid physics that makes droplet generation work. Clinicians who want point-of-care diagnostics rarely interact with the microfluidics engineers who design the assay formats. The deviceΓåÆclinical translation gap is compounded by regulatory challenges that neither group is trained to navigate.\n",
      "translation_table": [
        {
          "field_a_term": "Re << 1 (Stokes flow regime)",
          "field_b_term": "viscous force dominates inertia ΓåÆ laminar, fully predictable flow",
          "note": "no turbulence; no mixing by chaotic advection; flow is time-reversible (Stokes paradox)"
        },
        {
          "field_a_term": "P├⌐clet number Pe = vL/D",
          "field_b_term": "ratio of advective to diffusive transport; Pe >> 1 ΓåÆ diffusion-limited mixing",
          "note": "small molecules (D ~ 10Γü╗Γü╣ m┬▓/s) diffuse across 10 ╬╝m channel in 0.1 s; proteins slower"
        },
        {
          "field_a_term": "electroosmotic flow (EOF)",
          "field_b_term": "electrostatic pump driven by electric double layer (EDL) interaction with applied field",
          "note": "plug-like velocity profile (unlike Poiseuille parabolic) ΓÇö advantageous for electrophoresis"
        },
        {
          "field_a_term": "dielectrophoresis (DEP) force F_DEP Γê¥ ΓêçE┬▓",
          "field_b_term": "label-free cell sorting by dielectric properties (Re[f_CM])",
          "note": "frequency-tunable ΓÇö cancer cells have different membrane capacitance than normal cells"
        },
        {
          "field_a_term": "droplet microfluidics (W/O emulsions)",
          "field_b_term": "picoliter reaction vessels; Poisson statistics determine cell encapsulation rate",
          "note": "droplet size controlled by flow rates and channel geometry; barcoded beads enable cell identification"
        }
      ],
      "references": [
        {
          "doi": "10.1103/RevModPhys.77.977",
          "note": "Squires & Quake (2005) Microfluidics ΓÇö fluid physics at the nanoliter scale; Rev Mod Phys 77:977"
        },
        {
          "doi": "10.1146/annurev.fluid.36.050802.122124",
          "note": "Stone et al. (2004) Engineering flows in small devices ΓÇö microfluidics toward a lab-on-a-chip; Annu Rev Fluid Mech 36:381"
        },
        {
          "doi": "10.1038/nature01322",
          "note": "Whitesides (2006) The origins and the future of microfluidics; Nature 442:368"
        },
        {
          "doi": "10.1016/j.cell.2015.05.002",
          "note": "Macosko et al. (2015) Highly parallel genome-wide expression profiling of individual cells using nanoliter droplets; Cell 161:1202"
        }
      ],
      "last_reviewed": "2026-05-07",
      "file": "cross-domain/physics-engineering/b-microfluidics-lab-on-chip.yaml"
    },
    {
      "id": "b-plasma-physics-fusion-energy",
      "title": "Plasma confinement physics — MHD equilibrium, instability theory, and the Lawson criterion — directly determines engineering requirements for fusion reactors: the safety factor q, energy confinement time τ_E, and plasma-facing material constraints are all derivable from first-principles plasma physics and now validated by ITER design and NIF ignition.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "Plasma confinement for fusion energy requires solving the magnetohydrodynamic (MHD) equilibrium equation ∇p = J × B, where pressure gradient is balanced by the magnetic force. In a tokamak, this demands nested toroidal magnetic flux surfaces. The safety factor q = rB_φ/(RB_θ), the ratio of toroidal ",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-iter-q10-ignition-margin-sufficient-commercial-fusion"
      ],
      "communication_gap": "Plasma physics is taught as a physics discipline (MHD, kinetic theory) while fusion engineering is taught as nuclear engineering. The communities publish in Nuclear Fusion and Physics of Plasmas (physics) vs. Fusion Engineering and Design (engineering), with limited cross-citation. The Lawson criterion is widely known in both communities, but deeper connections — the derivation of engineering tolerances from instability growth rates, the mapping of confinement scaling onto reactor design — are less systematically taught across the divide.\n",
      "translation_table": [
        {
          "field_a_term": "MHD equilibrium ∇p = J × B (plasma physics)",
          "field_b_term": "Pressure balance design specification for coil and blanket geometry",
          "note": "Determines aspect ratio, plasma shape, and plasma-facing component loads"
        },
        {
          "field_a_term": "Safety factor q = rB_φ/RB_θ must be irrational (MHD theory)",
          "field_b_term": "Toroidal field coil current profile programming in tokamak operation",
          "note": "q-profile is actively controlled to avoid rational surfaces"
        },
        {
          "field_a_term": "Lawson criterion nτ_E T > 3×10²¹ m⁻³·s·keV",
          "field_b_term": "Target plasma parameters: n, T, confinement time for reactor design",
          "note": "The fundamental engineering target derived from D-T cross-section physics"
        },
        {
          "field_a_term": "Energy confinement time τ_E (plasma physics scaling)",
          "field_b_term": "Thermal insulation performance metric for the reactor vessel",
          "note": "τ_E ∝ B^0.15 n^0.41 P^{-0.69} R^{1.97} (IPB98y2 H-mode scaling)"
        },
        {
          "field_a_term": "Beta limit β_max (Troyon limit β_N = β/(I/aB))",
          "field_b_term": "Maximum achievable plasma pressure → sets magnetic field requirement",
          "note": "Higher B allows higher absolute pressure for same β_N"
        },
        {
          "field_a_term": "Alpha particle self-heating (born at 3.5 MeV in D-T)",
          "field_b_term": "Ignition condition — plasma heating exceeds all losses without external power",
          "note": "NIF achieved ignition via alpha-heating cascade in 2022"
        }
      ],
      "references": [
        {
          "doi": "10.1088/0370-1301/70/1/303",
          "note": "Lawson (1957) Some criteria for a power producing thermonuclear reactor, Proc Phys Soc B 70:6 — the fundamental nτ_E T criterion\n"
        },
        {
          "note": "Wesson (2011) Tokamaks, 4th ed., Oxford — comprehensive plasma physics and engineering text; derives all instability criteria and engineering requirements\n"
        },
        {
          "doi": "10.1103/PhysRevLett.129.075001",
          "note": "Abu-Shawareb et al. (2022) Lawson criterion for ignition exceeded in an inertial confinement fusion experiment, Phys Rev Lett 129:075001 — NIF ignition result, Q>1 for ICF for first time\n"
        },
        {
          "note": "Freidberg (2007) Plasma Physics and Fusion Energy, Cambridge — bridges plasma physics theory to engineering design; covers MHD equilibrium, stability, and Lawson criterion\n"
        }
      ],
      "last_reviewed": "2026-05-06",
      "file": "cross-domain/physics-engineering/b-plasma-physics-fusion-energy.yaml"
    },
    {
      "id": "b-quantum-limited-amplification-x-heisenberg-noise-figure-bound",
      "title": "Phase-preserving amplifiers add quantum noise bounded by Heisenberg uncertainty — when expressed as excess over classical Johnson noise at the input, this yields a fundamental noise figure floor near 3 dB at high gain for conventional quadrature devices (quantum optics ↔ microwave engineering).\n",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "Caves derived that a linear phase-preserving amplifier with large gain must introduce noise equivalent to at least half a quantum at the input port when referenced against the signal quadrature, translating into an intrinsic noise figure NF ≥ 3 dB for large gain under standard definitions. This link",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-josephson-paramp-nears-quantum-noise-floor-with-rimp-matched-array"
      ],
      "communication_gap": "Microwave datasheets quote NF without mentioning quantum limits; quantum optics literature seldom converts squeezing metrics into IEEE NF unless targeting radio astronomy instrumentation collaborations.\n",
      "translation_table": [
        {
          "field_a_term": "quadrature uncertainty relation ΔX ΔP ≥ ℏ/2",
          "field_b_term": "minimum added noise number N ≥ ½ for phase-preserving linear amplification (large gain)",
          "note": "Standard Haus–Caves picture for bosonic modes."
        },
        {
          "field_a_term": "noise figure referenced to thermal noise temperature T₀",
          "field_b_term": "quantum noise temperature contributions from zero-point fluctuations at GHz in cryogenic LNAs",
          "note": "At room temperature mm-wave, classical Johnson noise can dominate but quantum floor matters in cryo receivers."
        },
        {
          "field_a_term": "squeezing / phase-sensitive amplification",
          "field_b_term": "sub–3 dB effective NF strategies when quadratures become asymmetric by design",
          "note": "Does not violate uncertainty; redistributes noise budgets."
        }
      ],
      "references": [
        {
          "doi": "10.1103/PhysRevD.26.1817",
          "note": "Caves (1982) — quantum limits on noise in linear amplifiers (Phys. Rev. D)."
        },
        {
          "doi": "10.1103/PhysRev.128.2407",
          "note": "Haus & Mullen (1962) — quantum noise in linear amplifiers (Phys. Rev.)."
        }
      ],
      "last_reviewed": "2026-05-07",
      "file": "cross-domain/physics-engineering/b-quantum-limited-amplification-x-heisenberg-noise-figure-bound.yaml"
    },
    {
      "id": "b-quantum-sensing-fundamental-limits",
      "title": "Quantum metrology achieves Heisenberg-limited sensitivity — quantum sensors beat classical noise floors by exploiting entanglement and squeezing",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "The Heisenberg uncertainty principle ΔxΔp ≥ ℏ/2 sets a fundamental sensitivity limit for all measurements. Classical sensors are limited by shot noise (standard quantum limit, SQL): sensitivity scales as 1/√N for N independent measurements. Quantum sensors using entangled or squeezed states achieve ",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-heisenberg-limited-sensing-biological"
      ],
      "communication_gap": "Quantum optics, atomic physics, and gravitational wave astronomy each developed quantum sensing techniques independently. The unifying theoretical framework of quantum Fisher information and quantum Cramér-Rao bounds was formalized in the 2000s but is not standard training in any single engineering discipline.\n",
      "translation_table": [
        {
          "field_a_term": "Heisenberg uncertainty principle ΔxΔp ≥ ℏ/2",
          "field_b_term": "fundamental noise floor of any physical sensor",
          "note": "Sets the standard quantum limit (SQL) that classical sensors asymptotically approach"
        },
        {
          "field_a_term": "squeezed light (reduced phase noise, enhanced amplitude noise)",
          "field_b_term": "redistributing sensor noise from measured to unmeasured quadrature",
          "note": "LIGO uses squeezed vacuum injection to beat shot noise at high frequencies"
        },
        {
          "field_a_term": "N-particle entangled GHZ state",
          "field_b_term": "Heisenberg-limited sensor with 1/N phase sensitivity",
          "note": "N-fold improvement over classical 1/sqrt(N) scaling"
        },
        {
          "field_a_term": "quantum Fisher information F_Q",
          "field_b_term": "maximum achievable precision (quantum Cramér-Rao bound 1/sqrt(F_Q))",
          "note": "F_Q = N for separable states; F_Q = N^2 for maximally entangled GHZ states"
        }
      ],
      "references": [
        {
          "doi": "10.1103/PhysRevD.23.1693",
          "note": "Caves, C.M. (1981). Quantum-mechanical noise in an interferometer. Phys Rev D 23:1693."
        },
        {
          "doi": "10.1103/PhysRevLett.116.061102",
          "note": "Abbott et al. [LIGO] (2016). Observation of gravitational waves. Phys Rev Lett 116:061102."
        },
        {
          "doi": "10.1103/RevModPhys.89.035002",
          "note": "Degen et al. (2017). Quantum sensing. Rev Mod Phys 89:035002."
        },
        {
          "doi": "10.1126/science.1104149",
          "note": "Giovannetti, Lloyd & Maccone (2004). Quantum-enhanced measurements. Science 306:1330."
        }
      ],
      "last_reviewed": "2026-05-06",
      "file": "cross-domain/physics-engineering/b-quantum-sensing-fundamental-limits.yaml"
    },
    {
      "id": "b-semiconductor-lasers-photonics",
      "title": "Einstein's stimulated emission (1917) and the semiconductor p-n junction (double heterostructure, Kroemer Nobel 2000) bridge quantum optics physics to photonics engineering — enabling laser diodes, VCSELs, and DFB lasers for fiber optic communications and photonic integrated circuits on silicon.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "Einstein's 1917 derivation of stimulated emission established that population inversion (N₂ > N₁) produces optical gain g(ν) = σ(ν)(N₂−N₁), where σ is the stimulated emission cross-section. The Fabry-Pérot cavity condition 2nL = mλ selects longitudinal modes with spacing Δν = c/(2nL). The semiconduc",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-silicon-photonics-dfb-laser-integration"
      ],
      "communication_gap": "Quantum optics theorists (who work on cavity QED, photon statistics, laser rate equations) and photonics engineers (who design PICs, DFB lasers, WDM systems) publish in different journals (Physical Review Letters vs. Journal of Lightwave Technology, Optics Express) and attend different conferences (CLEO Physics vs. CLEO Applications). The theoretical tools are shared but have developed independently. The silicon photonics revolution was driven by engineers who rediscovered photonic crystal theory from condensed matter physics with minimal cross-community contact.\n",
      "translation_table": [
        {
          "field_a_term": "population inversion (quantum optics)",
          "field_b_term": "carrier injection above threshold in laser diode",
          "note": "Electrical pumping of the p-n junction creates population inversion in the active layer"
        },
        {
          "field_a_term": "stimulated emission cross-section σ(ν)",
          "field_b_term": "modal gain g(ν) in semiconductor active layer",
          "note": "Gain spectrum in semiconductors reflects the joint density of states and occupation"
        },
        {
          "field_a_term": "Fabry-Pérot resonator (2nL = mλ)",
          "field_b_term": "laser cavity (cleaved facets or distributed Bragg reflectors)",
          "note": "Cleaved semiconductor facets act as partial mirrors (R ≈ 30%) to form the cavity"
        },
        {
          "field_a_term": "confinement factor Γ",
          "field_b_term": "double heterostructure waveguiding (Kroemer design)",
          "note": "Γ is the fraction of modal power overlapping the gain region — maximized by heterostructure design"
        },
        {
          "field_a_term": "Bragg grating (distributed feedback)",
          "field_b_term": "DFB laser single-mode selection",
          "note": "DFB grating selects the Bragg wavelength, suppressing all side modes for WDM applications"
        },
        {
          "field_a_term": "photonic bandgap (periodic dielectric structure)",
          "field_b_term": "photonic crystal fiber and on-chip photonic crystal laser",
          "note": "Photonic crystals translate semiconductor bandgap physics to photon confinement"
        }
      ],
      "references": [
        {
          "note": "Einstein (1917) — On the quantum theory of radiation (stimulated emission)",
          "doi": "10.1002/andp.19173270110"
        },
        {
          "note": "Kroemer (1963) — A proposed class of heterojunction injection lasers (Nobel 2000)",
          "doi": "10.1109/PROC.1963.2706"
        },
        {
          "note": "Hall et al. (1962) — Coherent light emission from GaAs junctions",
          "doi": "10.1103/PhysRevLett.9.366"
        },
        {
          "note": "Saleh & Teich (2007) — Fundamentals of Photonics (textbook)",
          "url": "https://www.wiley.com/en-us/Fundamentals+of+Photonics%2C+3rd+Edition-p-9781119506874"
        }
      ],
      "last_reviewed": "2026-05-06",
      "file": "cross-domain/physics-engineering/b-semiconductor-lasers-photonics.yaml"
    },
    {
      "id": "b-shockley-queisser-thermodynamic-limit",
      "title": "The Shockley-Queisser (SQ) efficiency limit of ~33% for single-junction solar cells is a consequence of the second law of thermodynamics applied to photon statistics: the Carnot-like bound arising from treating the sun as a blackbody at T_sun = 5778 K limits radiative recombination losses, and no single-bandgap cell can exceed η_SQ regardless of material choice.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "Shockley & Queisser (1961) derived the efficiency limit using detailed balance: a solar cell in equilibrium emits and absorbs photons; the maximum voltage is set by quasi-Fermi level splitting ΔE_F = qV_oc, which cannot exceed the photon chemical potential μ = hν − TS_photon. Henry (1980) reframed t",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-tandem-cell-thermodynamic-optimum-bandgap-pairing"
      ],
      "communication_gap": "Solar cell engineers use the SQ limit as a benchmark without always framing it as a thermodynamic bound analogous to Carnot's theorem. Thermodynamicists studying radiation have not always engaged with photovoltaic engineering details. Henry's 1980 thermodynamic rederivation of the SQ limit is underappreciated in the photovoltaics literature compared to the original Shockley & Queisser paper.\n",
      "translation_table": [
        {
          "field_a_term": "Bandgap energy E_g of the semiconductor absorber",
          "field_b_term": "Energy threshold for photon absorption (Heaviside filter)",
          "note": "Photons with hν < E_g are transmitted (sub-bandgap loss); hν > E_g thermalise to E_g"
        },
        {
          "field_a_term": "Open-circuit voltage V_oc (maximum voltage)",
          "field_b_term": "Photon chemical potential / quasi-Fermi level splitting",
          "note": "V_oc is set by the balance between photon absorption and radiative recombination emission"
        },
        {
          "field_a_term": "SQ efficiency limit 33% at E_g = 1.34 eV",
          "field_b_term": "Maximum work from a two-temperature heat engine with photon reservoir",
          "note": "SQ = Carnot × (absorption efficiency) × (quantum efficiency); thermalisation reduces it to 33%"
        },
        {
          "field_a_term": "Multi-junction tandem cell (N bandgaps)",
          "field_b_term": "Multi-stage heat engine (each stage harvests a photon energy band)",
          "note": "N junctions reduce thermalisation loss; N→∞ approaches thermodynamic limit of ~68% at 1 sun"
        }
      ],
      "references": [
        {
          "doi": "10.1063/1.1736034",
          "note": "Shockley & Queisser (1961) J. Appl. Phys. – detailed balance limit of efficiency of p-n junction solar cells"
        },
        {
          "doi": "10.1063/1.328272",
          "note": "Henry (1980) J. Appl. Phys. – limiting efficiencies of ideal single and multiple energy gap terrestrial solar cells"
        },
        {
          "doi": "10.1038/nmat4388",
          "note": "Polman & Atwater (2016) – photonic design principles for ultrahigh-efficiency photovoltaics beyond SQ"
        },
        {
          "doi": "10.1021/acs.jpclett.6b01060",
          "note": "Geisz et al. – six-junction solar cell; 47.1% efficiency under concentrated sunlight"
        }
      ],
      "last_reviewed": "2026-05-07",
      "file": "cross-domain/physics-engineering/b-shockley-queisser-thermodynamic-limit.yaml"
    },
    {
      "id": "b-thermoacoustics-heat-engines",
      "title": "Acoustic pressure oscillations in gas-filled tubes can sustain heat engine and refrigeration cycles with no moving parts, achieving Carnot efficiency in the ideal limit — the thermoacoustic effect bridges acoustic wave physics with classical thermodynamics and has produced practical heat engines with >30% Carnot efficiency.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "The thermoacoustic effect (discovered by Sondhauss 1850, theoretically explained by Kirchhoff 1868): when an acoustic standing wave establishes a steep temperature gradient along a solid surface (stack), the oscillating gas parcels undergo an approximate thermodynamic cycle. A gas parcel moves towar",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-thermoacoustic-travelling-wave-carnot-approach"
      ],
      "communication_gap": "Thermoacoustics straddles acoustics (ASA journals), heat transfer (ASME journals), and thermodynamics (physics journals) — communities that rarely read each other's work. Rott (1969-1980) developed the linear thermoacoustic theory in a series of Advances in Applied Mechanics papers that were technically demanding and rarely cited in engineering practice. Swift's work at LANL from 1988 onward translated this into accessible engineering design equations, but thermoacoustics remains outside standard engineering curricula. Combustion engineers studying thermoacoustic instabilities and cryogenic engineers building thermoacoustic refrigerators are literally in the same physical phenomenon from opposite engineering perspectives and rarely attend the same conferences.\n",
      "translation_table": [
        {
          "field_a_term": "acoustic pressure amplitude p̃₁ at stack",
          "field_b_term": "pressure variation in thermodynamic cycle (piston compression)"
        },
        {
          "field_a_term": "acoustic velocity amplitude ũ₁ (displacement oscillation)",
          "field_b_term": "piston stroke in Stirling engine"
        },
        {
          "field_a_term": "phase angle φ between p̃ and ũ",
          "field_b_term": "phasing of piston expansion and heat exchange strokes"
        },
        {
          "field_a_term": "stack temperature gradient dT₀/dx",
          "field_b_term": "temperature difference between hot and cold reservoirs"
        },
        {
          "field_a_term": "regenerator (porous material storing and releasing heat)",
          "field_b_term": "regenerator in Stirling engine (thermal storage element)"
        },
        {
          "field_a_term": "thermoacoustic heat flux Q̇₂",
          "field_b_term": "heat rejection rate of thermodynamic cycle"
        },
        {
          "field_a_term": "Rayleigh criterion (pressure-heat release in phase → instability)",
          "field_b_term": "positive feedback in active acoustic amplifier"
        }
      ],
      "references": [
        {
          "doi": "10.1121/1.396617",
          "note": "Swift (1988) J Acoust Soc Am 84:1145 — thermoacoustic engines review"
        },
        {
          "note": "Rott (1980) Adv Appl Mech 20:135 — thermoacoustic oscillation theory"
        },
        {
          "doi": "10.1038/21398",
          "note": "Backhaus & Swift (1999) Nature 399:335 — thermoacoustic Stirling engine 30% Carnot"
        },
        {
          "note": "Swift (2002) Thermoacoustics: A Unifying Perspective. ASA Press"
        }
      ],
      "last_reviewed": "2026-05-06",
      "file": "cross-domain/physics-engineering/b-thermoacoustics-heat-engines.yaml"
    },
    {
      "id": "b-thermodynamics-computing-energy",
      "title": "Thermodynamics of Computing and Energy Limits — Landauer's principle, reversible logic, neuromorphic architectures, and the brain's energy efficiency define fundamental and practical computing bounds",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "Landauer's principle (1961) establishes that logically irreversible operations — those that erase information — must dissipate at least k_BT ln 2 ≈ 3×10⁻²¹ J per bit at room temperature into the environment, converting logical irreversibility into thermodynamic entropy production. This is the fundam",
      "open_unknowns": [],
      "related_hypotheses": [],
      "communication_gap": "Thermodynamicists and information theorists who study Landauer's principle work largely in academic physics; chip architects and VLSI engineers designing real CPUs rarely engage with the fundamental physics literature. The neuromorphic computing community (IBM, Intel, Qualcomm) operates at the intersection but faces a commercial valley-of-death: neuromorphic chips offer energy advantages only for specific sparse workloads, while general-purpose AI training (dense matrix multiplication) remains dominated by GPU/TPU architectures.\n",
      "translation_table": [
        {
          "field_a_term": "Landauer limit k_BT ln 2 per bit erased",
          "field_b_term": "minimum power dissipation floor for irreversible logic",
          "note": "At 300 K: 3×10⁻²¹ J; at 1 GHz clock rate: 3×10⁻¹² W — vanishingly small but non-zero"
        },
        {
          "field_a_term": "logically reversible computation (Toffoli/Fredkin gates)",
          "field_b_term": "zero-dissipation computing in principle; billiard-ball model",
          "note": "Reversible gates preserve all input information in outputs; no bit erasure → no mandatory dissipation"
        },
        {
          "field_a_term": "CMOS dynamic power P = αCV²f",
          "field_b_term": "switching energy per transistor per clock cycle",
          "note": "Dominant energy cost in real CPUs; scales as V²; voltage scaling is the main lever but threshold voltage limits scaling"
        },
        {
          "field_a_term": "leaky integrate-and-fire (LIF) neuron model",
          "field_b_term": "event-driven neuromorphic computation unit",
          "note": "Spikes are sparse (1–10% neurons active per timestep) → 10–100× energy savings vs. dense matrix multiplication"
        },
        {
          "field_a_term": "brain energy budget 20 W for 86 billion neurons",
          "field_b_term": "0.2 fJ per synaptic operation (empirical upper bound)",
          "note": "Each synapse fires ~1 Hz; 10¹⁴ synapses × 1 Hz × E_syn = 20 W → E_syn ≈ 0.2 fJ; still 100× Landauer"
        },
        {
          "field_a_term": "adiabatic charging of capacitor from resonant source",
          "field_b_term": "energy recovery in low-frequency reversible logic circuits",
          "note": "CV²/2 per cycle reduced to CV²/(4N) for N-stage adiabatic charging; practical at audio frequencies only"
        }
      ],
      "references": [
        {
          "doi": "10.1147/rd.53.0183",
          "note": "Landauer (1961) IBM J Res Dev 5:183 — irreversibility and heat generation in computing"
        },
        {
          "doi": "10.1147/rd.173.0525",
          "note": "Bennett (1973) IBM J Res Dev 17:525 — logical reversibility of computation"
        },
        {
          "doi": "10.1126/science.1254642",
          "note": "Merolla et al. (2014) Science 345:668 — TrueNorth neuromorphic chip"
        },
        {
          "doi": "10.1109/MM.2018.112130359",
          "note": "Davies et al. (2018) IEEE Micro 38:82 — Loihi neuromorphic processor"
        }
      ],
      "last_reviewed": "2026-05-06",
      "file": "cross-domain/physics-engineering/b-thermodynamics-computing-energy.yaml"
    },
    {
      "id": "b-kuramoto-synchronization",
      "title": "The Kuramoto model of coupled phase oscillators is a single mathematical framework that simultaneously describes neural gamma-band synchronization, cardiac pacemaker coupling, power-grid frequency stability, and laser array coherence — four fields with almost no cross-disciplinary communication despite sharing identical governing equations.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "proposed",
      "bridge_claim": "The Kuramoto model (1975) describes a population of N coupled phase oscillators:\n  d(theta_i)/dt = omega_i + (K/N) * sum_j sin(theta_j - theta_i)\nwhere omega_i are natural frequencies (drawn from a distribution g(omega)) and K is coupling strength. At critical coupling K_c = 2 / (pi * g(0)), a phase",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-cardiac-arrhythmia-phase-transition",
        "h-criticality-conscious-integration"
      ],
      "communication_gap": "The Kuramoto model appears in Physics Reviews, Journal of Neuroscience, Circulation, and IEEE Transactions on Power Systems — four journals whose editorial boards have essentially zero overlap. Cardiologists who study AF triggers use the term \"rotor\" (a spatial pattern of desynchronized spiral waves) without awareness that it maps to a Kuramoto defect. Power engineers who study frequency cascades use \"swing equation\" without awareness that it is a Kuramoto model on a network. The shared framework would allow each field to read the other's 50-year literature of analytical results.\n",
      "translation_table": [
        {
          "field_a_term": "Kuramoto coupling constant K",
          "field_b_term": "neural — GABAergic synapse conductance; cardiac — gap junction conductance; engineering — transmission line susceptance"
        },
        {
          "field_a_term": "critical coupling K_c = 2/(pi*g(0))",
          "field_b_term": "neural — gamma oscillation onset threshold; cardiac — arrhythmia vulnerability; engineering — grid stability margin"
        },
        {
          "field_a_term": "order parameter r (mean-field coherence)",
          "field_b_term": "neural — LFP gamma power; cardiac — atrial synchrony index; engineering — grid frequency coherence"
        },
        {
          "field_a_term": "natural frequency distribution g(omega)",
          "field_b_term": "neural — intrinsic firing rate distribution of interneurons; cardiac — cell-to-cell calcium oscillation period variance"
        },
        {
          "field_a_term": "phase transition at K_c (second-order, mean-field)",
          "field_b_term": "all systems — abrupt onset of synchrony; identified independently as \"critical transition\" in each field"
        },
        {
          "field_a_term": "finite-size fluctuations of r near K_c",
          "field_b_term": "neural — trial-to-trial variability near gamma onset; cardiac — paroxysmal AF triggered by noise; engineering — grid frequency noise near stability margin"
        }
      ],
      "references": [
        {
          "doi": "10.1007/BFb0013365",
          "note": "Kuramoto (1975) — original model; second-order synchronization transition"
        },
        {
          "doi": "10.1103/RevModPhys.77.137",
          "note": "Acebron et al. (2005) — comprehensive review of Kuramoto model across fields"
        },
        {
          "doi": "10.1016/j.neuron.2009.08.019",
          "note": "Fries (2009) — neural gamma oscillations and interneuron synchrony; implicit Kuramoto dynamics"
        },
        {
          "doi": "10.1161/CIRCRESAHA.116.308401",
          "note": "Nattel (2002) — AF mechanisms; rotor dynamics as Kuramoto desynchronization"
        },
        {
          "doi": "10.1109/TPWRS.2012.2196691",
          "note": "Dorfler & Bullo (2012) — explicit Kuramoto framework for power grid synchronization"
        },
        {
          "arxiv": "2003.07408",
          "note": "Rodrigues et al. (2016) — Kuramoto model on complex networks; spectral gap formula for K_c"
        }
      ],
      "last_reviewed": "2026-05-04",
      "file": "cross-domain/physics-engineering-neuroscience/b-kuramoto-synchronization.yaml"
    },
    {
      "id": "b-percolation-epidemiology",
      "title": "Network percolation theory and epidemic threshold theory are the same mathematical object — the epidemic threshold R_0=1 is a percolation phase transition, and importing finite-size scaling from condensed-matter physics would transform how outbreak risk is estimated in finite populations.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "proposed",
      "bridge_claim": "In bond percolation on a network, a giant connected component emerges at a critical bond probability p_c — below p_c the outbreak is finite; above it a macroscopic fraction of nodes is infected. The epidemic threshold R_0=1 (basic reproduction number equals 1) is *exactly* this percolation threshold",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-percolation-outbreak-threshold",
        "h-adaptive-therapy-percolation-threshold"
      ],
      "communication_gap": "The mathematical equivalence of SIR thresholds and percolation thresholds is established in the network science literature (Newman 2002, Kenah-Robins 2007, Miller 2010). However, the epidemiology literature overwhelmingly uses deterministic ODE models (SIR/SEIR) that are mean-field and infinite-population. The finite-size scaling tools are developed in condensed-matter physics and statistical mechanics journals that epidemiologists do not read. During COVID-19, finite-size effects in small institutional outbreaks (hospitals, nursing homes, schools) were handled with ad hoc simulation rather than systematic FSS — a direct consequence of this communication gap.\n",
      "translation_table": [
        {
          "field_a_term": "bond percolation threshold p_c",
          "field_b_term": "epidemic threshold R_0 = 1 (herd immunity boundary)"
        },
        {
          "field_a_term": "giant connected component",
          "field_b_term": "epidemic — macroscopic fraction of population infected"
        },
        {
          "field_a_term": "finite cluster below p_c",
          "field_b_term": "self-limiting outbreak (index case dies out without major spread)"
        },
        {
          "field_a_term": "finite-size scaling (FSS) correction to p_c in N-node network",
          "field_b_term": "correction to R_0 threshold in finite population of size N"
        },
        {
          "field_a_term": "percolation correlation length xi",
          "field_b_term": "spatial/network scale over which infections correlate (cluster size distribution)"
        },
        {
          "field_a_term": "order parameter (giant component fraction P_inf)",
          "field_b_term": "final attack rate (fraction of population ultimately infected)"
        },
        {
          "field_a_term": "scaling function near p_c (P_inf ~ |p-p_c|^beta)",
          "field_b_term": "epidemic size as function of R_0 near threshold"
        },
        {
          "field_a_term": "universality class (mean-field percolation for random graphs, 2D percolation for spatial grids)",
          "field_b_term": "contact network topology class (random mixing vs spatially structured population)"
        }
      ],
      "references": [
        {
          "doi": "10.1103/PhysRevE.66.016128",
          "note": "Newman (2002) — proved SIR epidemic threshold = bond percolation threshold on random graphs"
        },
        {
          "doi": "10.1093/aje/kwm120",
          "note": "Kenah & Robins (2007) — epidemic as percolation on the transmission network"
        },
        {
          "doi": "10.1103/PhysRevE.76.010101",
          "note": "Miller (2007) — epidemic size and final size as percolation order parameter"
        },
        {
          "arxiv": "cond-mat/9212004",
          "note": "Finite-size scaling in percolation — the FSS toolkit that epidemiology has not imported"
        },
        {
          "doi": "10.1038/nature04153",
          "note": "Newman (2006) — modularity and community structure in networks; relevant to spatial epidemic structure"
        }
      ],
      "last_reviewed": "2026-05-04",
      "file": "cross-domain/physics-epidemiology/b-percolation-epidemiology.yaml"
    },
    {
      "id": "b-minority-game-economics",
      "title": "Minority game (El Farol bar problem) ↔ market microstructure ↔ quasispecies evolution",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "Arthur (1994) posed the El Farol Bar problem: 100 agents decide weekly whether to attend a bar; those in the minority (fewer than 60 attend) have fun, those in the majority do not. No single strategy is self-consistent if all use it. Challet & Zhang (1997) formalized this as the Minority Game (MG): ",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-market-crash-turbulent-transition",
        "h-minority-game-quasispecies-duality"
      ],
      "communication_gap": "Arthur published in the American Economic Review (1994). Challet & Zhang published in Physica A (1997). The financial market connection was made by Farmer (1999) and Li et al. (2000). The quasispecies connection was made by Manrubia & Zanette (2002) in Europhysics Letters. None of these papers cross-cite the others systematically, and the unified framework (phase transitions in competitive information systems) has no single review paper accessible to economists.\n",
      "translation_table": [
        {
          "field_a_term": "minority group wins (MG)",
          "field_b_term": "price-setting by the minority in financial markets",
          "note": "Sellers in minority set price up; buyers in minority set price down"
        },
        {
          "field_a_term": "alpha = P/N phase transition",
          "field_b_term": "onset of market efficiency / alpha_c = information capacity per agent",
          "note": "alpha_c ~ 0.34 in canonical MG; matches empirical estimates"
        },
        {
          "field_a_term": "strategy pool crowding",
          "field_b_term": "correlated investor strategies (momentum vs contrarian)",
          "note": "Too many investors using identical strategies destroy their edge"
        },
        {
          "field_a_term": "MG disordered phase",
          "field_b_term": "quasispecies above error threshold (viable evolving population)",
          "note": "Both: disordered but functioning; information is present but noisy"
        },
        {
          "field_a_term": "MG ordered phase",
          "field_b_term": "error catastrophe / market microstructure breakdown",
          "note": "Convergence on a single strategy; predictable but self-defeating"
        }
      ],
      "references": [
        {
          "doi": "10.2307/2118432",
          "note": "Arthur (1994) AER - El Farol Bar problem original statement"
        },
        {
          "doi": "10.1016/S0378-4371(97)00419-6",
          "note": "Challet & Zhang (1997) Physica A - original Minority Game"
        },
        {
          "doi": "10.1103/PhysRevLett.84.1550",
          "note": "Savit et al. (1999) PRL - phase transition in Minority Game"
        },
        {
          "doi": "10.1103/PhysRevE.65.026126",
          "note": "Manrubia & Zanette (2002) PRE - quasispecies connection to MG"
        }
      ],
      "last_reviewed": "2026-05-05",
      "file": "cross-domain/physics-finance/b-minority-game-economics.yaml"
    },
    {
      "id": "b-openalex-stat-mech-agency-costs",
      "title": "The principal-agent problem in corporate finance maps onto a statistical mechanics system where agency costs are the free energy of misaligned incentive configurations, and optimal contracting is equivalent to finding the minimum free energy state of a coupled spin system with heterogeneous local fields.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "proposed",
      "bridge_claim": "Jensen and Meckling (1976, 70 k citations) showed that agency costs — the welfare loss from separating ownership and control — arise from information asymmetry and divergent incentive structures between principals and agents. Statistical mechanics offers a quantitative framework for this: treating e",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-firm-equilibrium-stat-mech-analogy"
      ],
      "communication_gap": "Financial economists rarely read Journal of Statistical Physics; condensed-matter physicists rarely read Journal of Finance. The equilibrium-contract literature uses utility maximisation language; statistical mechanics uses partition functions. Econophysicists have explored some connections but without engaging the principal-agent literature directly. Grant panels for finance do not include physicists.\n",
      "translation_table": [
        {
          "field_a_term": "spin configuration {s_i}",
          "field_b_term": "vector of agent effort choices {e_i}",
          "note": "Each agent chooses an effort level; the aggregate configuration determines firm output"
        },
        {
          "field_a_term": "local field h_i",
          "field_b_term": "marginal incentive from compensation contract for agent i",
          "note": "Equity grants, bonuses, and monitoring intensity set the effective field"
        },
        {
          "field_a_term": "free energy F",
          "field_b_term": "total agency cost A(F, I) from Jensen-Meckling decomposition",
          "note": "Residual loss + bonding cost + monitoring cost = free energy gap from first-best"
        },
        {
          "field_a_term": "temperature T (disorder)",
          "field_b_term": "information asymmetry (measurement noise, hidden action)",
          "note": "Higher information asymmetry prevents the principal from distinguishing effort signals"
        },
        {
          "field_a_term": "ground state (T=0)",
          "field_b_term": "first-best contract (perfect information, zero agency cost)",
          "note": "The idealized minimum free energy state achievable only at zero temperature"
        }
      ],
      "references": [
        {
          "doi": "10.1016/0304-405x(76)90026-x",
          "note": "Jensen & Meckling (1976) Theory of the Firm — 70,036 citations; primary reference"
        },
        {
          "doi": "10.1023/A:1009791806820",
          "note": "Bouchaud & Mezard (2000) — wealth condensation in a simple model of the economy"
        },
        {
          "doi": "10.1103/PhysRevLett.116.128701",
          "note": "Statistical mechanics of economic networks and inequality"
        }
      ],
      "last_reviewed": "2026-05-07",
      "file": "cross-domain/physics-finance/b-openalex-stat-mech-agency-costs.yaml"
    },
    {
      "id": "b-spin-glass-replica-x-factor-covariance-clustering-finance",
      "title": "Replica symmetry breaking in mean-field spin glasses describes hierarchical clustering of pure states in coupling disorder — a geometric picture loosely echoed when eigenstructure cleaning of financial covariance matrices exposes nested factor structure, **with heavy caveats**: empirical correlations are non-stationary, non-Gaussian, and far from thermodynamic limits used in Parisi theory.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "proposed",
      "bridge_claim": "Random-matrix bulk/outlier separation (Marchenko–Pastur) already rationalizes noise eigenvalues in sample covariance matrices (see established USDR bridges). Spin-glass replica narratives add an **interpretive layer**: ultrametric overlaps among replica pure states resemble hierarchical clustering t",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-replica-sparsity-predicts-factor-eigenvalue-noise-bulk"
      ],
      "communication_gap": "Mathematical finance adoption of RMT cleaning advanced via physicists (Laloux, Plerou, Potters) while spin-glass RSB intuition remains niche — practitioners risk confusing qualitative clustering metaphors with empirical covariance estimators unless uncertainty quantification is explicit.\n",
      "translation_table": [
        {
          "field_a_term": "Parisi overlap distribution / RSB hierarchy",
          "field_b_term": "hierarchical factor models / nested industrial clusters",
          "note": "Metaphorical alignment — not an established isomorphism."
        },
        {
          "field_a_term": "disorder realization (exchange randomness)",
          "field_b_term": "cross-sectional draw of asset returns in a window",
          "note": "Both introduce quenched randomness; stationarity assumptions differ sharply."
        },
        {
          "field_a_term": "replica trick computing disorder-averaged free energy",
          "field_b_term": "bootstrap / subsampling across time windows for covariance stability",
          "note": "Methodological analogy only — different objectives and estimators."
        }
      ],
      "references": [
        {
          "doi": "10.1103/PhysRevLett.43.1754",
          "note": "Parisi (1979) — infinite number of order parameters for spin glasses; Phys Rev Lett."
        },
        {
          "doi": "10.1103/PhysRevLett.83.1467",
          "note": "Laloux et al. (1999) — noise dressing of financial correlation matrices; Phys Rev Lett."
        }
      ],
      "last_reviewed": "2026-05-07",
      "file": "cross-domain/physics-finance/b-spin-glass-replica-x-factor-covariance-clustering-finance.yaml"
    },
    {
      "id": "b-turbulence-financial-markets",
      "title": "Kolmogorov turbulence cascade ↔ multifractal volatility in financial markets",
      "source_domain": "",
      "target_domain": "",
      "status": "proposed",
      "bridge_claim": "Kolmogorov (1941) derived that in fully developed turbulence, energy cascades from large eddies to small ones with a universal power-law energy spectrum E(k) ~ k^{-5/3}, and velocity increments delta_v(r) ~ r^{1/3} (the Kolmogorov scaling). Mandelbrot (1963) observed that financial return increments",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-market-crash-turbulent-transition"
      ],
      "communication_gap": "Mandelbrot noticed the turbulence-finance analogy in the 1960s but it was treated as a metaphor by mainstream finance (which adopted Gaussian Brownian motion instead). Econophysics developed the quantitative multifractal models in the 1990s-2000s in physics journals (Physica A, Physical Review E) largely unread by financial economists. The 2008 crisis renewed interest but the communities still publish in separate journals with minimal cross-citation.\n",
      "translation_table": [
        {
          "field_a_term": "energy cascade (large eddies → small eddies)",
          "field_b_term": "volatility cascade (long-term trends → short-term fluctuations)",
          "note": "Richardson cascade in turbulence; no analogue identified in market microstructure yet"
        },
        {
          "field_a_term": "Kolmogorov -5/3 energy spectrum",
          "field_b_term": "power-law autocorrelation of squared returns (ARCH)",
          "note": "Both arise from multiplicative cascade with log-normal increments"
        },
        {
          "field_a_term": "velocity increment delta_v(r) at scale r",
          "field_b_term": "return r(tau) over horizon tau",
          "note": "Anomalous scaling of moments in both"
        },
        {
          "field_a_term": "intermittency / multifractal spectrum f(alpha)",
          "field_b_term": "heteroskedasticity / volatility clustering",
          "note": "Both depart from simple monofractal (H = 1/3 or H = 1/2)"
        },
        {
          "field_a_term": "integral scale L (energy injection scale)",
          "field_b_term": "cascade timescale T ~ 1.5 years (S&P 500)",
          "note": "Where the cascade begins; identified empirically"
        },
        {
          "field_a_term": "Kolmogorov dissipation scale eta",
          "field_b_term": "market microstructure noise scale (tick data)",
          "note": "Where the cascade ends"
        },
        {
          "field_a_term": "Reynolds number Re (turbulence intensity)",
          "field_b_term": "market depth / liquidity (no formal correspondence yet)",
          "note": "Analogy incomplete — open unknown"
        }
      ],
      "references": [
        {
          "doi": "10.1007/BF00538711",
          "note": "Kolmogorov (1941) — -5/3 turbulence spectrum"
        },
        {
          "doi": "10.1017/CBO9780511817311",
          "note": "Frisch (1995) Turbulence — definitive text on the cascade"
        },
        {
          "doi": "10.1023/A:1003003012316",
          "note": "Mandelbrot, Fisher & Calvet (1997) — MMAR multifractal asset returns"
        },
        {
          "doi": "10.1088/1469-7688/1/2/306",
          "note": "Bacry, Delour & Muzy (2001) — Multifractal Random Walk"
        },
        {
          "doi": "10.1103/PhysRevLett.73.845",
          "note": "Ghashghaie et al. (1996) PRL — turbulent cascades in FX markets"
        }
      ],
      "last_reviewed": "2026-05-05",
      "file": "cross-domain/physics-finance/b-turbulence-financial-markets.yaml"
    },
    {
      "id": "b-cherenkov-radiation-x-mach-sonic-cone",
      "title": "Cherenkov light arises when a charged particle moves faster than the phase velocity of light in a medium — acoustic Mach cones and sonic booms arise when a source moves faster than the small-amplitude wave speed — both are cone-shaped envelopes of emitted wavefront interference tied to superluminal/super-acoustic motion relative to a linear dispersion relation.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "In optics the Cherenkov angle satisfies cos θ_C = c/(nv); in acoustics the Mach angle satisfies sin μ = c_s/v for steady supersonic motion in ideal fluids — both formulas locate a conical caustic where constructive interference concentrates emitted wave energy — pedagogically the same Huygens-constr",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-cherenkov-mach-prerequisite-transfer-diagnostic"
      ],
      "communication_gap": "Undergraduate electromagnetism introduces Cherenkov detectors without citing classical gas-dynamics Mach cones; aeroacoustics courses rarely mention Cherenkov despite identical cone-angle algebra — shared geometric optics vocabulary could shorten teaching transfer time.\n",
      "translation_table": [
        {
          "field_a_term": "Phase velocity limit in medium (Cherenkov threshold)",
          "field_b_term": "Sound speed limit (sonic boom threshold)",
          "note": "Both compare source speed to linear wave speed in the ambient medium."
        },
        {
          "field_a_term": "Cherenkov cone half-angle θ_C",
          "field_b_term": "Mach cone angle μ or sin μ = c_s/v",
          "note": "Geometric cone structure from causality / interference of emitted wavelets."
        },
        {
          "field_a_term": "Frank–Tamm emission intensity vs frequency (Frank–Tamm formula)",
          "field_b_term": "N-wave pressure signature on ground under supersonic aircraft",
          "note": "Spectral versus temporal profiles differ; analogy is kinematic cone geometry, not waveform identity."
        }
      ],
      "references": [
        {
          "doi": "10.1103/PhysRev.52.201",
          "note": "Frank & Tamm (1937) — coherent radiation from fast electrons passing through matter; cone kinematics"
        },
        {
          "doi": "10.1146/annurev.fluid.37.061903.175755",
          "note": "McIntyre (2005) — fluid mechanics of sounding bodies / Mach cone basics (review)"
        }
      ],
      "last_reviewed": "2026-05-07",
      "file": "cross-domain/physics-fluid-mechanics/b-cherenkov-radiation-x-mach-sonic-cone.yaml"
    },
    {
      "id": "b-kelvin-helmholtz-cloud-billows-x-plasma-shear-instability",
      "title": "Kelvin-Helmholtz billows in atmospheric cloud layers and shear-driven modes in magnetized plasmas share the same linear-instability logic: velocity shear converts interface perturbations into growing vortical or wave-like structures, with magnetic tension and compressibility adding plasma-specific stabilizing terms.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "proposed",
      "bridge_claim": "The bridge is speculative across observational settings but grounded in shared stability analysis: compare nondimensional growth rates after accounting for density contrast, shear thickness, compressibility, and magnetic-field alignment rather than treating cloud billows and fusion-edge shear modes ",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-kh-growth-rate-normalization-predicts-billow-plasma-onset"
      ],
      "communication_gap": "Atmospheric scientists emphasize remote-sensed morphology and stratification, while plasma physicists use dispersion relations and diagnostics from fusion devices.\n",
      "translation_table": [
        {
          "field_a_term": "velocity shear across cloud layers",
          "field_b_term": "ExB or parallel-flow shear in plasma",
          "note": "Both feed perturbation growth; plasma adds electromagnetic restoring forces."
        },
        {
          "field_a_term": "Richardson-number suppression",
          "field_b_term": "magnetic-tension or finite-Larmor-radius suppression",
          "note": "Stabilizers are not identical but play analogous threshold roles."
        },
        {
          "field_a_term": "billow wavelength and roll-up time",
          "field_b_term": "mode number and linear growth rate",
          "note": "Both become measurable outputs for stability comparison."
        }
      ],
      "references": [
        {
          "url": "https://doi.org/10.1017/CBO9780511624063",
          "note": "Drazin and Reid textbook treatment of hydrodynamic stability, including shear instabilities."
        }
      ],
      "last_reviewed": "2026-05-07",
      "file": "cross-domain/physics-fluid-mechanics/b-kelvin-helmholtz-cloud-billows-x-plasma-shear-instability.yaml"
    },
    {
      "id": "b-sonoluminescence-x-acoustic-cavitation-collapse",
      "title": "Single-bubble sonoluminescence arises when acoustically driven cavitation bubbles undergo violent spherical collapse, heating interior gases to emit broadband light flashes — linking continuum fluid mechanics of Rayleigh–Plesset collapse to extreme transient states where plasma-like ionization physics becomes relevant inside micrometer-scale cavities.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "Weakly compressible bubble dynamics concentrate kinetic energy into submicrometer hotspots producing picosecond light pulses — whether emission requires collisional ionization versus chemiluminescence mechanisms remains debated — yet experimental scaling laws tie flash timing and spectra to collapse",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-spectral-linewidth-scales-with-collapse-shock-mach-estimate"
      ],
      "communication_gap": "Cavitation engineers worry about impeller erosion while plasma physicists discuss Z-pinch scaling — sonoluminescence sits between communities with separate instrumentation traditions (hydrophones vs streak spectroscopy).\n",
      "translation_table": [
        {
          "field_a_term": "Rayleigh–Plesset radius dynamics under sinusoidal driving pressure",
          "field_b_term": "radial implosion velocity leading to peak temperatures at minimum radius",
          "note": "Fluid mechanics supplies collapse kinematics feeding boundary conditions for interior gas models."
        },
        {
          "field_a_term": "Mach-number-like shell convergence during rebound shocks inside bubble",
          "field_b_term": "weakly ionized plasma diagnostics terminology used for interior states",
          "note": "Plasma vocabulary useful phenomenologically; degree of ionization remains experiment-dependent."
        },
        {
          "field_a_term": "broadband optical spectra / picosecond pulse widths",
          "field_b_term": "thermal bremsstrahlung versus molecular emission hypotheses in cavitation literature",
          "note": "Mechanistic emission bridge remains contested; cross-field debate motivates unified diagnostics."
        }
      ],
      "references": [
        {
          "doi": "10.1103/RevModPhys.74.425",
          "note": "Brenner, Hilgenfeldt & Lohse (2002) — single-bubble sonoluminescence review (Rev. Mod. Phys.)"
        },
        {
          "doi": "10.1038/378557a0",
          "note": "Gaitan et al. (1992) — experimental SBSL demonstration establishing reproducible flashes (Nature)"
        }
      ],
      "last_reviewed": "2026-05-07",
      "file": "cross-domain/physics-fluid-mechanics/b-sonoluminescence-x-acoustic-cavitation-collapse.yaml"
    },
    {
      "id": "b-atmospheric-convection-x-rayleigh-benard",
      "title": "Atmospheric Convection x Rayleigh-Bénard — cumulus clouds as convective cells\n",
      "source_domain": "",
      "target_domain": "",
      "status": "proposed",
      "bridge_claim": "Cumulus cloud formation and thunderstorm organization follow Rayleigh-Bénard convection dynamics above the critical Rayleigh number Ra_c = 1708; convective available potential energy (CAPE) is the atmospheric analogue of the thermal driving force, and mesoscale convective systems self-organize into ",
      "open_unknowns": [],
      "related_hypotheses": [],
      "communication_gap": "Laboratory fluid dynamicists studied Rayleigh-Bénard convection as a controlled system for studying turbulence onset; atmospheric scientists developed cumulus parameterization empirically from radiosonde observations; the quantitative connection (CAPE as Ra analog) is taught in meteorology but the transfer of RB turbulence theory to atmospheric parameterizations remains incomplete.\n",
      "translation_table": [
        {
          "field_a_term": "Convective available potential energy (CAPE)",
          "field_b_term": "Rayleigh number (thermal buoyancy forcing)",
          "note": "CAPE (J/kg) measures the integrated buoyancy of a rising air parcel; the Rayleigh number Ra = gαΔTL³/(νκ) measures thermal buoyancy relative to viscous and thermal diffusion; both are dimensionless measures of the driving force for convection.\n"
        },
        {
          "field_a_term": "Cumulus convection onset",
          "field_b_term": "Rayleigh-Bénard instability at Ra > Ra_c",
          "note": "The convective inhibition energy (CIN) is the atmospheric barrier to convection onset, analogous to the subcritical stability of Rayleigh-Bénard below Ra_c = 1708; convection initiates when CAPE exceeds CIN.\n"
        },
        {
          "field_a_term": "Mesoscale convective organization (hexagonal patterns)",
          "field_b_term": "Bénard convection cells above onset",
          "note": "Organized convection in the tropics self-arranges into mesoscale structures (200-2000 km) resembling Bénard cells; the aspect ratio of convective cells is determined by the atmospheric layer depth, mirroring laboratory convection.\n"
        },
        {
          "field_a_term": "Entrainment and detrainment in cumulus plumes",
          "field_b_term": "Entrainment rate in turbulent convective plumes",
          "note": "Atmospheric cumulus parameterization uses plume entrainment rates from turbulent Rayleigh-Bénard experiments; Morton-Taylor-Turner (1956) plume theory underlies both laboratory and atmospheric convection parameterizations.\n"
        }
      ],
      "references": [
        {
          "doi": "10.1017/S0022112000001373",
          "note": "Getling (1998) — Rayleigh-Bénard Convection: Structures and Dynamics; World Scientific"
        }
      ],
      "last_reviewed": "2026-05-07",
      "file": "cross-domain/physics-geology/b-atmospheric-convection-x-rayleigh-benard.yaml"
    },
    {
      "id": "b-mantle-rheology-x-viscoelasticity",
      "title": "Mantle Rheology x Viscoelasticity - Earth's interior as Maxwell fluid\n",
      "source_domain": "",
      "target_domain": "",
      "status": "proposed",
      "bridge_claim": "The Earth's mantle behaves as a Newtonian viscous fluid on geological timescales (glacial isostatic adjustment, eta ~ 10^21 Pa*s) but as an elastic solid on seismic timescales; this Maxwell viscoelastic behavior - with relaxation time tau = eta/G - means mantle rheology is the same physics as silly ",
      "open_unknowns": [],
      "related_hypotheses": [],
      "communication_gap": "Polymer physics developed Maxwell viscoelasticity for polymer melts (Rouse, Zimm, reptation models); geodynamics independently characterized mantle rheology through glacial rebound studies; cross-fertilization has been limited despite describing the same physics at vastly different scales.\n",
      "translation_table": [
        {
          "field_a_term": "Mantle viscosity (eta ~ 10^21 Pa*s, lower mantle)",
          "field_b_term": "Viscosity in Maxwell viscoelastic model",
          "note": "The Maxwell model has stress relaxation time tau = eta/G; for the mantle, eta = 10^21 Pa*s and G ~ 10^11 Pa gives tau ~ 10^10 s ~ 300 years, consistent with glacial isostatic adjustment timescales.\n"
        },
        {
          "field_a_term": "Seismic waves (elastic response, milliseconds)",
          "field_b_term": "Elastic limit of Maxwell model (t << tau)",
          "note": "At timescales much shorter than the relaxation time, the Maxwell fluid responds elastically with shear modulus G; seismic waves (period 1-100 s) << tau (300 years) so the mantle is elastic for seismology.\n"
        },
        {
          "field_a_term": "Glacial isostatic adjustment (viscous rebound, 10^3-10^4 years)",
          "field_b_term": "Viscous flow limit of Maxwell model (t >> tau)",
          "note": "At timescales >> tau, the Maxwell fluid flows viscously; postglacial rebound (Scandinavia, North America) occurs over 10,000 years >> tau, so viscous flow equations apply.\n"
        },
        {
          "field_a_term": "Post-seismic deformation (days to decades after earthquake)",
          "field_b_term": "Maxwell model transient response (t ~ tau)",
          "note": "The transient viscoelastic response of the mantle after large earthquakes occurs over timescales comparable to tau, requiring the full Maxwell model rather than purely elastic or viscous approximations.\n"
        }
      ],
      "references": [
        {
          "doi": "10.1146/annurev.earth.36.031207.124120",
          "note": "Karato (2008) - deformation of Earth materials: rheology of Earth's interior; Annual Rev Earth Planet Sci"
        },
        {
          "doi": "10.1029/2004GL021186",
          "note": "Mitrovica & Forte (2004) - new inference of mantle viscosity from joint inversion of GIA data; GRL"
        }
      ],
      "last_reviewed": "2026-05-07",
      "file": "cross-domain/physics-geology/b-mantle-rheology-x-viscoelasticity.yaml"
    },
    {
      "id": "b-plate-tectonics-x-convection",
      "title": "Plate tectonics x Mantle convection - lithospheric plates as convective cells\n",
      "source_domain": "",
      "target_domain": "",
      "status": "proposed",
      "bridge_claim": "Plate tectonics is the surface expression of thermally driven mantle convection; subducting slabs are the cold, dense downwellings and mid-ocean ridges are upwellings in a Rayleigh-Benard convection cell with Rayleigh number Ra ~ 10^7 — far exceeding the critical Ra ~ 10^3 for convection onset, maki",
      "open_unknowns": [],
      "related_hypotheses": [],
      "communication_gap": "Geologists studying continental drift (Wegener 1912) and fluid dynamicists studying Rayleigh-Benard convection (Benard 1900) developed parallel frameworks; the connection was made by Holmes (1931) proposing convection as the driver of drift, and quantified by mantle convection models (McKenzie 1968) — but the question of whether plates drive mantle flow or vice versa remains debated, reflecting the difficulty of coupling surface observations to interior dynamics.\n",
      "translation_table": [
        {
          "field_a_term": "tectonic plate motion (geoscience)",
          "field_b_term": "convective flow in Rayleigh-Benard convection (fluid mechanics)",
          "note": "Plates are the surface manifestation of convective cells; plate velocity is the surface flow velocity of the convecting mantle"
        },
        {
          "field_a_term": "subducting oceanic slab (geoscience)",
          "field_b_term": "cold, dense downwelling plume in convection (fluid mechanics)",
          "note": "Subducting slabs are the cold downwellings; their negative buoyancy provides the main driving force for plate motion (slab pull)"
        },
        {
          "field_a_term": "mid-ocean ridge spreading (geoscience)",
          "field_b_term": "hot upwelling in Rayleigh-Benard convection (fluid mechanics)",
          "note": "Ridge spreading centers are upwellings; however, ridges are largely passive — driven by slab pull, not active upwelling"
        },
        {
          "field_a_term": "mantle viscosity eta ~ 10^21 Pa·s (geophysics)",
          "field_b_term": "high-viscosity fluid in Stokes flow regime (fluid mechanics)",
          "note": "Mantle flow is at Re ~ 10^-20 (Stokes regime); viscosity controls the Ra and thus convection style (sluggish vs. vigorous)"
        }
      ],
      "references": [
        {
          "doi": "10.1016/j.pepi.2011.04.001",
          "note": "Tackley (2000/2011) - Modelling compressible mantle convection with large viscosity contrasts; PEPI — high-Ra mantle convection"
        }
      ],
      "last_reviewed": "2026-05-07",
      "file": "cross-domain/physics-geology/b-plate-tectonics-x-convection.yaml"
    },
    {
      "id": "b-seismic-wave-x-elastic-wave",
      "title": "Seismic waves ↔ Elastic wave theory — P and S waves as Navier equation solutions",
      "source_domain": "",
      "target_domain": "",
      "status": "proposed",
      "bridge_claim": "Seismic body waves (P-waves and S-waves) are solutions of the Navier elastodynamic equation in a heterogeneous elastic solid; wave speed ratios (Vp/Vs) reveal rock type and fluid content via Biot-Gassmann relations, making seismology an application of elastic wave physics to planetary-scale media.",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-seismic-wave-x-elastic-wave"
      ],
      "communication_gap": "Seismology developed as an observational science from earthquake recordings (Richter, Gutenberg, early 20th century) and independently developed wave propagation theory. Continuum mechanics and elastic wave theory were developed in mathematical physics (Cauchy, Navier, 19th century). These communities merged gradually through applied geophysics (oil exploration seismics), but seismologists and mechanical engineers rarely collaborate on earthquake-resistant structure design despite using identical governing equations.",
      "translation_table": [
        {
          "field_a_term": "P-wave (compressional seismic wave, particle motion parallel to propagation)",
          "field_b_term": "longitudinal elastic wave (dilatational, irrotational) in Navier equation",
          "note": "Vp = √((λ+2μ)/ρ); λ,μ = Lamé parameters; P-waves transmit through liquids"
        },
        {
          "field_a_term": "S-wave (shear seismic wave, particle motion transverse to propagation)",
          "field_b_term": "transverse elastic wave (equivoluminal, rotational) in Navier equation",
          "note": "Vs = √(μ/ρ); Vs = 0 in fluids (μ = 0) — explains S-wave shadow zone at Earth's core"
        },
        {
          "field_a_term": "Vp/Vs ratio (seismic wave speed ratio) in rock physics",
          "field_b_term": "Poisson ratio ν = (Vp/Vs)² - 2) / (2(Vp/Vs)² - 2) in elastic theory",
          "note": "ν = 0.5 for water-saturated rock (incompressible); ν < 0.3 for dry rock"
        },
        {
          "field_a_term": "Biot-Gassmann fluid substitution (changing pore fluid changes Vp)",
          "field_b_term": "Biot effective medium theory for porous elastic solids with fluid",
          "note": "Gassmann equations predict Vp change when oil is replaced by water or gas — seismic 4D monitoring"
        }
      ],
      "references": [
        {
          "doi": "10.1785/BSSA0480030105",
          "note": "Biot (1956) — theory of propagation of elastic waves in a fluid-saturated porous solid; JASA 28:168"
        },
        {
          "doi": "10.1190/1.9781560801931",
          "note": "Sheriff & Geldart (1995) — Exploration Seismology; Cambridge — textbook on seismic wave physics"
        }
      ],
      "last_reviewed": "2026-05-07",
      "file": "cross-domain/physics-geology/b-seismic-wave-x-elastic-wave.yaml"
    },
    {
      "id": "b-self-organized-criticality-x-earthquake",
      "title": "Self-organized criticality x Earthquake statistics — Gutenberg-Richter as SOC\n",
      "source_domain": "",
      "target_domain": "",
      "status": "proposed",
      "bridge_claim": "The Gutenberg-Richter power law for earthquake frequency-magnitude distributions is the signature of self-organized criticality in the Earth's crust; the crust self-tunes to the critical state without external parameter adjustment, exactly as in the Bak-Tang-Wiesenfeld sandpile model.\n",
      "open_unknowns": [],
      "related_hypotheses": [],
      "communication_gap": "Seismology developed empirically from the 1900s; SOC theory was formulated by Bak, Tang & Wiesenfeld in 1987. The connection was proposed early but quantitative tests of the SOC model for seismicity remain contested.\n",
      "translation_table": [
        {
          "field_a_term": "BTW sandpile slope (maintained at critical angle)",
          "field_b_term": "Earth's crust at critical stress state",
          "note": "Both systems self-organize to a marginally stable critical state; in the crust tectonic loading drives the system back toward criticality after each event.\n"
        },
        {
          "field_a_term": "Sandpile avalanche size distribution (power law)",
          "field_b_term": "Gutenberg-Richter earthquake magnitude distribution",
          "note": "Both follow power laws N(s) ~ s^(-tau); the GR exponent b = 1 corresponds to the BTW avalanche exponent tau = 1.5 in 2D (with appropriate rescaling).\n"
        }
      ],
      "references": [
        {
          "doi": "10.1103/PhysRevLett.59.381",
          "note": "Bak, Tang & Wiesenfeld (1987) — self-organized criticality: an explanation of 1/f noise"
        }
      ],
      "last_reviewed": "2026-05-07",
      "file": "cross-domain/physics-geology/b-self-organized-criticality-x-earthquake.yaml"
    },
    {
      "id": "b-thermohaline-circulation-x-buoyancy-flow",
      "title": "Ocean Thermohaline Circulation x Density-Driven Flow — AMOC as buoyancy-forced conveyor\n",
      "source_domain": "",
      "target_domain": "",
      "status": "proposed",
      "bridge_claim": "The Atlantic meridional overturning circulation (AMOC) is driven by density differences (temperature and salinity gradients) that create a pressure-gradient force; the Stommel two-box model shows AMOC has two stable equilibria (strong/weak) separated by a saddle-node bifurcation — a tipping point in",
      "open_unknowns": [],
      "related_hypotheses": [],
      "communication_gap": "Physical oceanographers studying AMOC developed box models and GCMs; dynamical systems theorists studied bifurcations in fluid systems; the tipping point framing (Lenton et al. 2008) brought these communities together, but quantitative saddle-node analysis of AMOC with observational constraints remains an active frontier.\n",
      "translation_table": [
        {
          "field_a_term": "Thermohaline density difference (Δρ)",
          "field_b_term": "Buoyancy forcing in geophysical fluid dynamics",
          "note": "Ocean density ρ(T,S) depends on temperature and salinity; poleward surface cooling increases density, driving sinking and deep water formation — the engine of AMOC.\n"
        },
        {
          "field_a_term": "AMOC transport strength (Sv = 10⁶ m³/s)",
          "field_b_term": "Flow rate in driven buoyancy convection",
          "note": "AMOC transports ~18 Sv of water northward in the upper Atlantic; this is equivalent to 100 Amazon rivers and represents a dominant mechanism of poleward heat transport in the climate system.\n"
        },
        {
          "field_a_term": "Stommel two-box bifurcation",
          "field_b_term": "Saddle-node bifurcation in ODE system",
          "note": "The Stommel model (1961) has two stable solutions: strong AMOC (temperature- dominated) and weak AMOC (salinity-dominated); these are separated by a saddle-node bifurcation at a critical freshwater flux threshold.\n"
        },
        {
          "field_a_term": "Freshwater forcing (melting ice sheets)",
          "field_b_term": "Bifurcation parameter controlling equilibrium branch",
          "note": "Increased freshwater input (e.g., Greenland melt) reduces North Atlantic salinity, potentially pushing the system past the saddle-node bifurcation to the weak AMOC state — a climate tipping point with global consequences.\n"
        }
      ],
      "references": [
        {
          "doi": "10.1111/j.2153-3490.1961.tb00079.x",
          "note": "Stommel (1961) — Thermohaline convection with two stable regimes of flow; Tellus 13:224"
        }
      ],
      "last_reviewed": "2026-05-07",
      "file": "cross-domain/physics-geology/b-thermohaline-circulation-x-buoyancy-flow.yaml"
    },
    {
      "id": "b-entropy-arrow-of-time",
      "title": "Thermodynamic entropy increase, Landauer's information-erasure bound, and the cosmological arrow of time are three faces of the same asymmetry — a unified account requires identifying which low-entropy boundary condition (past hypothesis, Penrose's Weyl curvature, quantum decoherence) breaks time-reversal invariance at each scale.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "proposed",
      "bridge_claim": "Three apparently separate arrows of time — thermodynamic (entropy increases), computational (Landauer: erasing one bit dissipates at least k_B T ln 2 of heat), and cosmological (the universe began in an extraordinarily low-entropy state) — are governed by the same underlying asymmetry.\nLandauer's pr",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-landauer-cosmological-arrow"
      ],
      "communication_gap": "Thermodynamics, cosmology, and information theory each have their own arrow-of-time literature with almost no cross-citation. Cosmologists rarely engage with Landauer's principle; information theorists rarely engage with Penrose's Weyl curvature hypothesis; thermodynamicists often treat the arrow as a solved problem (second law). Bridging these requires comfort in all three fields simultaneously.\n",
      "translation_table": [
        {
          "field_a_term": "entropy S = k_B ln W (Boltzmann)",
          "field_b_term": "information H = -sum p_i log p_i (Shannon)",
          "note": "S = k_B ln 2 * H; the two quantities differ only by units"
        },
        {
          "field_a_term": "thermodynamic irreversibility (dS >= 0)",
          "field_b_term": "logical irreversibility (bit erasure)",
          "note": "Landauer's principle makes this equivalence exact and experimentally testable"
        },
        {
          "field_a_term": "low-entropy Big Bang initial condition",
          "field_b_term": "low-Kolmogorov-complexity initial state of the universe",
          "note": "The past hypothesis in thermodynamics corresponds to high algorithmic compressibility"
        },
        {
          "field_a_term": "Weyl curvature = 0 at t=0 (Penrose)",
          "field_b_term": "gravitational degrees of freedom not yet entangled with matter",
          "note": "Penrose's CCC links this to quantum gravity"
        },
        {
          "field_a_term": "Maxwell's demon defeated by Szilard-Landauer argument",
          "field_b_term": "measurement + memory erasure fully accounts for demon's thermodynamic budget"
        }
      ],
      "references": [
        {
          "doi": "10.1147/rd.53.0183",
          "note": "Landauer (1961) - irreversibility and heat generation in the computing process"
        },
        {
          "doi": "10.1017/CBO9780511622717",
          "note": "Penrose (1989) - The Emperor's New Mind; Weyl curvature hypothesis"
        },
        {
          "doi": "10.1038/nature10872",
          "note": "Berut et al. (2012) - Experimental verification of Landauer's principle"
        },
        {
          "arxiv": "quant-ph/0306044",
          "note": "Bennett (2003) - Notes on the history of reversible computation"
        },
        {
          "doi": "10.1093/acprof:oso/9780199237555.001.0001",
          "note": "Penrose (2010) - Cycles of Time; CCC cosmology"
        }
      ],
      "last_reviewed": "2026-05-05",
      "file": "cross-domain/physics-information/b-entropy-arrow-of-time.yaml"
    },
    {
      "id": "b-landauer-information-thermodynamics",
      "title": "Landauer's principle ↔ thermodynamic cost of information erasure (Maxwell's demon resolution)",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "Landauer (1961) proved that erasing one bit of information in a thermal environment at temperature T requires dissipating at least k_B * T * ln(2) of free energy as heat — approximately 3 zJ at room temperature.  This resolves Maxwell's demon paradox (1867): the demon cannot violate the second law b",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-brain-landauer-efficiency"
      ],
      "communication_gap": "Landauer's 1961 paper appeared in IBM Journal of Research and Development — an engineering journal — and was largely ignored by both physicists and computer scientists for two decades.  Shannon's information theory community knew about entropy but not the physical cost of erasure.  Thermodynamicists knew about the second law but not that information erasure was the key mechanism.  The bridge was not fully established until Bennett's 1982 paper, 21 years after Landauer.\n",
      "translation_table": [
        {
          "field_a_term": "entropy decrease in gas (demon sorts molecules)",
          "field_b_term": "information gain (1 bit per measurement)",
          "note": "Szilard (1929) established this equivalence"
        },
        {
          "field_a_term": "free energy cost of erasing memory",
          "field_b_term": "k_B * T * ln(2) per bit erased",
          "note": "The Landauer limit — exact lower bound"
        },
        {
          "field_a_term": "logically irreversible operation (AND, OR, ERASE)",
          "field_b_term": "thermodynamically irreversible — must dissipate heat",
          "note": "NAND gate erases 1 bit; minimum cost k_B T ln 2"
        },
        {
          "field_a_term": "logically reversible computation (Toffoli gate)",
          "field_b_term": "thermodynamically reversible — zero minimum dissipation",
          "note": "Bennett (1973) proved reversible universal computation possible"
        },
        {
          "field_a_term": "second law of thermodynamics",
          "field_b_term": "Shannon entropy cannot decrease for free",
          "note": "Mutual information = negative entropy in Szilard engine"
        }
      ],
      "references": [
        {
          "doi": "10.1147/rd.53.0183",
          "note": "Landauer (1961) — original paper: irreversibility and heat generation"
        },
        {
          "doi": "10.1038/nature10872",
          "note": "Berut et al. (2012) Nature — experimental verification at 1% of Landauer limit"
        },
        {
          "doi": "10.1147/rd.173.0525",
          "note": "Bennett (1973) — logical reversibility of computation"
        },
        {
          "doi": "10.1038/scientificamerican1182-48",
          "note": "Bennett (1982) — thermodynamics of computation review"
        }
      ],
      "last_reviewed": "2026-05-05",
      "file": "cross-domain/physics-information/b-landauer-information-thermodynamics.yaml"
    },
    {
      "id": "b-defects-mechanical-strength",
      "title": "The mechanical strength of crystalline materials is governed entirely by dislocation physics: Taylor hardening (τ ∝ √ρ), the Hall-Petch grain-size effect (σ_y ∝ d⁻¹/²), and Orowan precipitate strengthening reduce all strength-of-materials to the statistical mechanics of dislocation ensembles in a periodic lattice.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "A perfect crystal is theoretically very strong: theoretical shear strength τ_th ≈ Gb/(2πa) ≈ G/30 where G is shear modulus (~40 GPa for steel) and a is lattice spacing. Real iron fails at τ ~ 50 MPa — three orders of magnitude weaker — because of dislocations: line defects characterised by their Bur",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-dislocation-density-taylor-hardening-md-validation"
      ],
      "communication_gap": "Dislocation physics was developed by physicists (Taylor, Orowan, Burgers, Nabarro) but published primarily in materials science and metallurgy journals. Condensed matter physicists trained post-1970 typically do not study dislocation theory (it is not in standard graduate curricula), even though it is one of the most successful applications of solid-state physics to engineering. Materials engineers use Taylor/Hall-Petch empirically without connecting to the statistical mechanics derivations that would enable prediction in new alloy systems.\n",
      "translation_table": [
        {
          "field_a_term": "Burgers vector b of edge/screw dislocation",
          "field_b_term": "unit of plastic strain per dislocation glide event",
          "note": "b = a/2⟨110⟩ in FCC metals; b = a/2⟨111⟩ in BCC — determines slip geometry"
        },
        {
          "field_a_term": "dislocation density ρ (lines per m²)",
          "field_b_term": "degree of work hardening / stored plastic strain energy",
          "note": "Annealed Cu: ρ ~ 10¹⁰ m⁻²; cold-worked: ρ ~ 10¹⁵ m⁻²"
        },
        {
          "field_a_term": "Taylor factor M relating resolved shear to macroscopic stress",
          "field_b_term": "crystallographic texture effect on polycrystal yield stress",
          "note": "M = σ_macro/τ_resolved ~ 3.06 for random FCC texture; texture affects strength"
        },
        {
          "field_a_term": "grain boundary as dislocation pile-up barrier",
          "field_b_term": "Hall-Petch k_y coefficient (grain boundary strengthening efficacy)",
          "note": "k_y depends on grain boundary energy and dislocation core structure"
        },
        {
          "field_a_term": "Orowan loop left behind precipitate",
          "field_b_term": "precipitation hardening back-stress and fatigue crack nucleation",
          "note": "Orowan loops accumulate → back-stress → Bauschinger effect in cyclic loading"
        }
      ],
      "references": [
        {
          "doi": "10.1098/rspa.1934.0106",
          "note": "Taylor (1934) The mechanism of plastic deformation of crystals. Proc R Soc A 145:362"
        },
        {
          "doi": "10.1088/0370-1301/64/9/303",
          "note": "Hall (1951) The deformation and ageing of mild steel. Proc Phys Soc B 64:747"
        },
        {
          "note": "Petch (1953) The cleavage strength of polycrystals. J Iron Steel Inst 174:25"
        },
        {
          "doi": "10.1088/0034-4885/12/1/309",
          "note": "Orowan (1948) Discussion in Symposium on Internal Stresses in Metals and Alloys. Rep Prog Phys 12:185"
        }
      ],
      "last_reviewed": "2026-05-06",
      "file": "cross-domain/physics-materials-science/b-defects-mechanical-strength.yaml"
    },
    {
      "id": "b-dislocations-crystal-plasticity",
      "title": "Dislocations (line defects in crystalline lattices) are the microscopic mechanism of plastic deformation in metals — dislocation glide requires far less stress than shearing a perfect crystal (Taylor 1934), connecting continuum plastic flow mechanics to atomic-scale crystal structure through the dislocation density tensor.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "PERFECT CRYSTAL PROBLEM: The theoretical shear strength of a perfect crystal is τ_theory = G/2π ≈ G/6, where G is the shear modulus. For copper, τ_theory ≈ 4 GPa. Observed yield stress: ~1 MPa — a factor of 1000 lower.\nDISLOCATION RESOLUTION (Taylor, Orowan, Polyani 1934): A dislocation is a line de",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-dislocation-based-plasticity-hea-alloys"
      ],
      "communication_gap": "Materials scientists and condensed matter physicists know dislocation theory well. Continuum mechanicians working on plasticity often treat it as a black box without microscopic justification. Crystal plasticity finite element methods (CPFEM) bridge both but require expertise in both fields.\n",
      "translation_table": [
        {
          "field_a_term": "Burgers vector b (crystallography)",
          "field_b_term": "plastic strain increment per dislocation glide event",
          "note": "The Burgers vector is both the topological invariant of the defect and the unit of plastic deformation"
        },
        {
          "field_a_term": "dislocation density rho (density of line defects per m^2)",
          "field_b_term": "internal variable in continuum plasticity theory",
          "note": "rho appears in hardening laws; equivalent to backstress in kinematic hardening models"
        },
        {
          "field_a_term": "Peierls-Nabarro stress (lattice friction)",
          "field_b_term": "yield stress in continuum plasticity",
          "note": "τ_PN sets the single-crystal yield stress before work hardening; polycrystal yield requires Taylor factor"
        },
        {
          "field_a_term": "dislocation core structure",
          "field_b_term": "strain energy density at singularity",
          "note": "Core energy must be regularised in continuum field theories — exactly as elastic field theories regularise point sources"
        }
      ],
      "references": [
        {
          "doi": "10.1098/rspa.1934.0106",
          "note": "Taylor (1934) Proc R Soc A 145:362 — first dislocation theory of plasticity"
        },
        {
          "note": "Orowan (1934) Z Phys 89:634 — independent dislocation proposal"
        },
        {
          "note": "Peierls (1940) Proc Phys Soc 52:34 — Peierls stress derivation"
        },
        {
          "note": "Hirth & Lothe (1982) Theory of Dislocations, Wiley — comprehensive treatment"
        }
      ],
      "last_reviewed": "2026-05-06",
      "file": "cross-domain/physics-materials-science/b-dislocations-crystal-plasticity.yaml"
    },
    {
      "id": "b-topological-materials-band-theory",
      "title": "Topological insulators host bulk band gaps alongside surface/edge states protected by time-reversal symmetry, characterized by the ℤ₂ topological invariant and Chern number C = (1/2π)∫_{BZ} Ω_k dk — a quantized topological invariant that predicts the quantum anomalous Hall conductance σ_xy = Ce²/h without free parameters.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "Topological insulators (TIs) are materials whose electronic band structure has a bulk gap (like a conventional insulator) but whose surface or edge hosts gapless, conducting states protected by time-reversal symmetry (TRS). These surface states cannot be removed by any TRS-preserving perturbation wi",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-topological-insulator-majorana-fault-tolerant-qubit"
      ],
      "communication_gap": "Topological materials theory requires algebraic topology (fiber bundles, K-theory, cohomology) that is not taught in standard physics programs. The mathematical machinery was known to mathematicians (Atiyah-Singer index theorem, 1963) but its application to condensed matter required the insight of Thouless et al. (1982) that band structure invariants are topological. The physics literature rarely cites the mathematical topology literature (K-theory of C*-algebras, Kitaev periodic table) despite using equivalent results.\n",
      "translation_table": [
        {
          "field_a_term": "Chern number C = (1/2π)∫_{BZ} Ω_k dk",
          "field_b_term": "integer topological invariant from Berry curvature integral",
          "note": "Identical to TKNN invariant (Thouless et al. 1982) for quantum Hall effect"
        },
        {
          "field_a_term": "Berry curvature Ω_n(k) in Brillouin zone",
          "field_b_term": "curvature 2-form on a U(1) fiber bundle over momentum space",
          "note": "Topological in the sense of fiber bundle topology (mathematics)"
        },
        {
          "field_a_term": "ℤ₂ topological invariant ν",
          "field_b_term": "parity of Chern number (even → trivial, odd → topological)",
          "note": "ν = 0 (trivial) vs. ν = 1 (topological) for 2D TRS-symmetric insulators"
        },
        {
          "field_a_term": "topologically protected surface states",
          "field_b_term": "zero modes of Atiyah-Singer index theorem",
          "note": "Bulk-boundary correspondence is an index theorem — bulk topology forces surface modes"
        },
        {
          "field_a_term": "Weyl node (Berry curvature monopole)",
          "field_b_term": "magnetic monopole in k-space (Dirac monopole, charge ±1)",
          "note": "Fermi arcs connecting Weyl nodes are the k-space analog of Dirac strings"
        },
        {
          "field_a_term": "Majorana zero mode at TI-superconductor interface",
          "field_b_term": "non-Abelian anyon (Ising anyon) for topological quantum computation",
          "note": "Braiding Majoranas implements fault-tolerant quantum gates"
        }
      ],
      "references": [
        {
          "doi": "10.1103/PhysRevLett.95.146802",
          "note": "Kane & Mele (2005) Phys Rev Lett 95:146802 — ℤ₂ topological insulator in graphene"
        },
        {
          "doi": "10.1126/science.1133734",
          "note": "Bernevig, Hughes & Zhang (2006) Science 314:1757 — prediction of HgTe TI"
        },
        {
          "doi": "10.1103/RevModPhys.82.3045",
          "note": "Hasan & Kane (2010) Rev Mod Phys 82:3045 — topological insulators review"
        },
        {
          "doi": "10.1103/RevModPhys.90.015001",
          "note": "Armitage, Mong & Vishwanath (2018) Rev Mod Phys 90:015001 — Weyl and Dirac semimetals"
        }
      ],
      "last_reviewed": "2026-05-06",
      "file": "cross-domain/physics-materials-science/b-topological-materials-band-theory.yaml"
    },
    {
      "id": "b-acoustic-metamaterials-x-negative-refraction",
      "title": "Acoustic Metamaterials x Negative Refraction — locally resonant structures as effective medium\n",
      "source_domain": "",
      "target_domain": "",
      "status": "proposed",
      "bridge_claim": "Acoustic metamaterials with locally resonant inclusions (rubber-coated lead spheres) exhibit simultaneously negative effective mass density and bulk modulus near resonance, producing negative refraction and sound focusing below the diffraction limit; the effective medium is described by Z-transform ",
      "open_unknowns": [],
      "related_hypotheses": [],
      "communication_gap": "Electromagnetic metamaterials (Veselago 1968, Pendry 2000) were developed 30 years before acoustic metamaterials (Liu 2000); physicists working on EM metamaterials and acoustic metamaterials used the same mathematical framework but in separate communities; the cross-community transfer of negative-index design principles accelerated acoustic metamaterial development by ~10 years when recognized.\n",
      "translation_table": [
        {
          "field_a_term": "Local resonance frequency ω₀ of inclusion",
          "field_b_term": "Frequency at which effective mass density ρ_eff < 0",
          "note": "Below resonance, the inclusion moves in phase with the host; above resonance, it moves out of phase, producing negative effective inertia (ρ_eff < 0) — analogous to negative permittivity in electromagnetic metamaterials near plasma resonance.\n"
        },
        {
          "field_a_term": "Negative effective bulk modulus κ_eff < 0",
          "field_b_term": "Monopole resonance of air-filled bubble inclusion",
          "note": "Air bubbles in water have monopole resonance (breathing mode) that produces negative effective compressibility in a frequency band — the acoustic analogue of negative magnetic permeability in electromagnetic split-ring resonators.\n"
        },
        {
          "field_a_term": "Double-negative band (ρ_eff < 0, κ_eff < 0)",
          "field_b_term": "Negative acoustic refractive index",
          "note": "When both effective parameters are negative, the acoustic refractive index n = -√(ρ_eff κ_eff / ρ₀κ₀) is negative, causing Snell's law refraction with reversed sign — enabling flat acoustic lenses and cloaking.\n"
        },
        {
          "field_a_term": "Effective medium theory (long-wavelength limit)",
          "field_b_term": "Homogenization of periodic inclusion composite",
          "note": "When the inclusion size a << wavelength λ, the composite is described by effective medium parameters ρ_eff(ω) and κ_eff(ω); this homogenization is the acoustic analogue of Clausius-Mossotti polarizability theory.\n"
        }
      ],
      "references": [
        {
          "doi": "10.1126/science.289.5485.1734",
          "note": "Liu et al. (2000) — Locally resonant sonic materials; Science 289:1734"
        }
      ],
      "last_reviewed": "2026-05-07",
      "file": "cross-domain/physics-math/b-acoustic-metamaterials-x-negative-refraction.yaml"
    },
    {
      "id": "b-conformal-field-theory-x-critical-phenomena",
      "title": "Conformal Field Theory x Critical Phenomena - scale invariance as symmetry\n",
      "source_domain": "",
      "target_domain": "",
      "status": "proposed",
      "bridge_claim": "At a second-order phase transition, the system's scaling symmetry enhances to full conformal symmetry (invariant under angle-preserving maps); conformal field theory (CFT) classifies all possible universality classes of 2D critical phenomena exactly and constrains 3D critical exponents via the confo",
      "open_unknowns": [],
      "related_hypotheses": [],
      "communication_gap": "Statistical physicists and mathematical physicists developed parallel tools (RG, transfer matrices vs Virasoro algebras, modular invariance) that were unified only through the BPZ paper (1984); 3D CFT is still less understood than 2D, with the conformal bootstrap providing new constraints only in the last 15 years.\n",
      "translation_table": [
        {
          "field_a_term": "Universality class (critical exponents alpha, beta, gamma, nu)",
          "field_b_term": "CFT operator content (primary operators and their dimensions)",
          "note": "Each universality class corresponds to a unique CFT; the critical exponents are determined by the scaling dimensions of primary operators, so classifying CFTs is equivalent to classifying universality classes.\n"
        },
        {
          "field_a_term": "Correlation length divergence (xi ~ |T-Tc|^-nu)",
          "field_b_term": "Conformal invariance (long-range correlations at all scales)",
          "note": "At criticality, xi diverges and the system becomes scale-invariant; scale invariance plus unitarity and Lorentz invariance imply full conformal invariance, replacing power-law correlations with exact CFT two-point functions.\n"
        },
        {
          "field_a_term": "Wilson-Fisher fixed point (epsilon expansion)",
          "field_b_term": "Non-trivial CFT fixed point (anomalous dimensions)",
          "note": "The Wilson-Fisher fixed point of the RG flow corresponds to an interacting CFT; the anomalous dimensions of fields at the fixed point give the critical exponents.\n"
        },
        {
          "field_a_term": "Transfer matrix (1D quantum chain)",
          "field_b_term": "Virasoro algebra (2D CFT)",
          "note": "The transfer matrix of a 2D classical lattice model equals the Hamiltonian of a 1D quantum chain; at criticality, this Hamiltonian has Virasoro symmetry, making 2D critical phenomena exactly solvable via CFT.\n"
        }
      ],
      "references": [
        {
          "doi": "10.1016/0550-3213(84)90052-X",
          "note": "Belavin, Polyakov & Zamolodchikov (1984) - infinite conformal symmetry in 2D quantum field theory; Nucl Phys B 241:333"
        },
        {
          "doi": "10.1103/PhysRevD.86.025022",
          "note": "Rychkov & Tonni (2009) - conformal bootstrap in 3D; constraining the 3D Ising model critical exponents"
        }
      ],
      "last_reviewed": "2026-05-07",
      "file": "cross-domain/physics-math/b-conformal-field-theory-x-critical-phenomena.yaml"
    },
    {
      "id": "b-crystallography-x-group-theory",
      "title": "Crystallography x Group Theory — space groups as symmetry classification\n",
      "source_domain": "",
      "target_domain": "",
      "status": "proposed",
      "bridge_claim": "All possible crystal structures are classified by the 230 space groups — subgroups of the Euclidean group in 3D; group representation theory predicts allowed phonon modes, electronic band degeneracies, and optical selection rules from symmetry alone, making group theory the complete predictive langu",
      "open_unknowns": [],
      "related_hypotheses": [],
      "communication_gap": "Crystallographers enumerated the 230 space groups by exhaustive geometric arguments (Fedorov, Schoenflies, Barlow 1891) before abstract group theory was systematized; physicists applied representation theory to crystals (Bethe 1929, Wigner 1930) as a separate development; unified group-theoretic crystallography texts appeared only in the 1950s-60s.\n",
      "translation_table": [
        {
          "field_a_term": "Crystal space group",
          "field_b_term": "Subgroup of the Euclidean group E(3)",
          "note": "The 230 space groups are all distinct subgroups of E(3) (rotations, reflections, translations, screw axes, glide planes); Schoenflies and Hermann-Mauguin notation encodes the group generators.\n"
        },
        {
          "field_a_term": "Phonon modes at high-symmetry k-points",
          "field_b_term": "Irreducible representations of the little group",
          "note": "The symmetry of phonon modes is determined by the irreducible representations of the little group (stabilizer of k-point under the space group); mode degeneracy equals representation dimension.\n"
        },
        {
          "field_a_term": "Optical selection rules",
          "field_b_term": "Wigner-Eckart theorem for photon interaction",
          "note": "Raman-active and infrared-active modes are determined by whether the phonon representation appears in the decomposition of the photon-lattice interaction; group theory predicts this without calculating matrix elements.\n"
        },
        {
          "field_a_term": "Band degeneracy at high-symmetry points",
          "field_b_term": "Dimension of electronic band representation",
          "note": "Multi-dimensional irreducible representations force band degeneracies; accidental degeneracies (band crossings) require fine-tuning unless protected by symmetry.\n"
        }
      ],
      "references": [
        {
          "doi": "10.1107/S0108767383000842",
          "note": "Hahn (1983) — International Tables for Crystallography, Vol. A: Space-group symmetry"
        }
      ],
      "last_reviewed": "2026-05-07",
      "file": "cross-domain/physics-math/b-crystallography-x-group-theory.yaml"
    },
    {
      "id": "b-neutron-star-x-nuclear-matter",
      "title": "Neutron Star x Nuclear Matter — dense stellar interiors as cold Fermi liquid\n",
      "source_domain": "",
      "target_domain": "",
      "status": "proposed",
      "bridge_claim": "Neutron star interiors contain nuclear matter at densities exceeding nuclear saturation density (2×10^17 kg/m³); the equation of state is described by Landau Fermi liquid theory with strong nuclear interactions, and neutron star mass-radius measurements directly constrain nuclear symmetry energy — m",
      "open_unknowns": [],
      "related_hypotheses": [],
      "communication_gap": "Nuclear physicists study finite nuclei in terrestrial experiments (PREX, CREX); astrophysicists study neutron stars as bulk matter; gravitational wave astronomy (LIGO GW170817 tidal deformability) and X-ray timing (NICER) now connect these communities by providing bulk EOS constraints from neutron star observations.\n",
      "translation_table": [
        {
          "field_a_term": "Neutron star radius R (10-14 km)",
          "field_b_term": "Equation of state (pressure vs. density) of dense nuclear matter",
          "note": "The TOV (Tolman-Oppenheimer-Volkoff) equation relates the mass-radius curve to the nuclear equation of state; stiffer EOS (higher pressure) → larger radius for a given mass.\n"
        },
        {
          "field_a_term": "Nuclear symmetry energy E_sym",
          "field_b_term": "Difference in energy between pure neutron and symmetric nuclear matter",
          "note": "The symmetry energy slope parameter L determines neutron star radius and the neutron skin thickness of heavy nuclei; PREX-II (Pb skin) and NICER (neutron star radius) measurements provide complementary nuclear constraints.\n"
        },
        {
          "field_a_term": "Fermi liquid quasiparticle effective mass m*",
          "field_b_term": "Neutron effective mass in dense neutron star matter",
          "note": "Landau Fermi liquid theory parametrizes nuclear matter via effective mass and Landau parameters; these determine neutron star specific heat, cooling rate, and superfluid gap.\n"
        },
        {
          "field_a_term": "Neutron superfluidity (paired neutrons, triplet p-wave)",
          "field_b_term": "BCS Cooper pairing in dense fermionic matter",
          "note": "Neutrons in the neutron star inner crust pair in the ¹S₀ channel; in the core they form ³P₂ pairs; BCS theory applies at densities ρ ~ 2-5 ρ₀, producing neutron superfluidity that governs pulsar glitches.\n"
        }
      ],
      "references": [
        {
          "doi": "10.1103/RevModPhys.89.015007",
          "note": "Oertel et al. (2017) — Equations of state for supernovae and compact stars; Rev Mod Phys 89:015007"
        }
      ],
      "last_reviewed": "2026-05-07",
      "file": "cross-domain/physics-math/b-neutron-star-x-nuclear-matter.yaml"
    },
    {
      "id": "b-quantum-decoherence-x-classical-emergence",
      "title": "Quantum Decoherence x Classical Emergence — pointer states as preferred basis\n",
      "source_domain": "",
      "target_domain": "",
      "status": "proposed",
      "bridge_claim": "Quantum decoherence (entanglement with environment) selects preferred classical states (pointer states) that are stable under environmental monitoring; the quantum-to-classical transition is not a collapse postulate but an emergent property of open quantum systems, explained by environment-induced s",
      "open_unknowns": [],
      "related_hypotheses": [],
      "communication_gap": "The measurement problem was treated as philosophical by physicists and ignored by mathematicians; decoherence theory (Zeh 1970, Zurek 1981) was dismissed for decades as not solving the problem, until quantum information theory provided the mathematical framework (quantum channels, Kraus operators) to make decoherence quantitative.\n",
      "translation_table": [
        {
          "field_a_term": "Pointer states (einselection)",
          "field_b_term": "Stable fixed points of the decoherence map",
          "note": "Pointer states are eigenstates of the system-environment interaction Hamiltonian that remain pure despite environmental entanglement; mathematically they are fixed points of the completely positive decoherence superoperator.\n"
        },
        {
          "field_a_term": "Decoherence time τ_D",
          "field_b_term": "Timescale for off-diagonal density matrix decay",
          "note": "Off-diagonal coherences ρ_ij decay exponentially with rate τ_D ∝ (λ_dB/Δx)²; for macroscopic superpositions this is astronomically fast, explaining why Schrödinger cats are never observed.\n"
        },
        {
          "field_a_term": "Quantum Darwinism (redundant encoding)",
          "field_b_term": "Classical objectivity from environmental copies",
          "note": "Classical reality emerges because pointer state information is redundantly encoded in multiple environmental fragments; any observer measuring a small environmental fragment obtains the same classical outcome.\n"
        },
        {
          "field_a_term": "Lindblad master equation",
          "field_b_term": "Markovian open system dynamics",
          "note": "Decoherence dynamics follow the Lindblad equation (GKSL equation) — the most general completely positive, trace-preserving linear map, derived from the mathematics of quantum channels.\n"
        }
      ],
      "references": [
        {
          "doi": "10.1103/RevModPhys.75.715",
          "note": "Zurek (2003) — Decoherence, einselection, and the quantum origins of the classical; Rev Mod Phys 75:715"
        }
      ],
      "last_reviewed": "2026-05-07",
      "file": "cross-domain/physics-math/b-quantum-decoherence-x-classical-emergence.yaml"
    },
    {
      "id": "b-quantum-field-theory-x-combinatorics",
      "title": "Quantum Field Theory x Combinatorics - Feynman diagrams as graph enumeration\n",
      "source_domain": "",
      "target_domain": "",
      "status": "proposed",
      "bridge_claim": "Feynman diagram perturbation theory is a combinatorial expansion: the n-th order term counts all distinct n-vertex graphs with prescribed external legs, weighted by symmetry factors; the generating function of Feynman diagrams is a formal power series whose coefficients are computed by graph automor",
      "open_unknowns": [],
      "related_hypotheses": [],
      "communication_gap": "Feynman developed diagram perturbation theory as a physicist's mnemonic (1948); its combinatorial interpretation (symmetry factors = automorphism group) was recognized gradually; the full algebraic structure (Hopf algebra of renormalization) was only made explicit by Connes & Kreimer (1999), opening connections to algebraic combinatorics.\n",
      "translation_table": [
        {
          "field_a_term": "Feynman diagram (graph representing a scattering process)",
          "field_b_term": "Labeled graph in enumerative combinatorics",
          "note": "Each Feynman diagram is a graph with vertices (interaction events) and edges (propagators); the sum over diagrams at order n is the sum over all non-isomorphic graphs with n vertices and the correct number of external legs, weighted by symmetry factors.\n"
        },
        {
          "field_a_term": "Symmetry factor (1 / |Aut(diagram)|)",
          "field_b_term": "Inverse automorphism group size in graph enumeration",
          "note": "The symmetry factor of a Feynman diagram is the inverse of the order of its automorphism group - exactly the factor appearing in Burnside's lemma for counting labeled graphs modulo symmetries.\n"
        },
        {
          "field_a_term": "Generating functional Z[J] (path integral)",
          "field_b_term": "Exponential generating function of graphs",
          "note": "log Z[J] is the generating function of connected Feynman diagrams; exp(log Z[J]) generates all diagrams (including disconnected ones); this mirrors the exponential formula in combinatorics relating connected and total structures.\n"
        },
        {
          "field_a_term": "Renormalization (removing UV divergences)",
          "field_b_term": "Subtraction of divergent sub-graph contributions (Hopf algebra)",
          "note": "Connes and Kreimer (1999) showed that Feynman diagram renormalization is governed by the Hopf algebra of rooted trees, with the antipode implementing the BPHZ forest formula - a combinatorial identity.\n"
        }
      ],
      "references": [
        {
          "doi": "10.1007/s002200050499",
          "note": "Connes & Kreimer (1999) - renormalization in QFT and the Hopf algebra of rooted trees; CMP 199:203"
        },
        {
          "doi": "10.1007/BF01350282",
          "note": "Cvitanovic (1977) - group theory factors for Feynman diagrams; Physical Review D 14:1536"
        }
      ],
      "last_reviewed": "2026-05-07",
      "file": "cross-domain/physics-math/b-quantum-field-theory-x-combinatorics.yaml"
    },
    {
      "id": "b-renyi-entropy-x-multifractal",
      "title": "Renyi entropy x Multifractal spectra - generalized entropy as scaling exponent\n",
      "source_domain": "",
      "target_domain": "",
      "status": "proposed",
      "bridge_claim": "The Renyi entropy of order q, H_q = (1/(1-q)) log sum_i p_i^q, generates the full multifractal spectrum f(alpha) via Legendre transform tau(q) -> f(alpha); turbulent velocity fields, strange attractors, and financial return distributions are all multifractals whose scaling spectrum is computed from ",
      "open_unknowns": [],
      "related_hypotheses": [],
      "communication_gap": "Information theorists developing generalized entropies (Renyi 1961) and physicists studying fractal geometry and chaotic attractors (Mandelbrot, Grassberger, Procaccia — 1980s) developed parallel mathematical frameworks; the synthesis in the multifractal formalism (Halsey et al. 1986) revealed the Legendre transform connection, but finance (multifractal models of asset returns) and genomics (fractal analysis of DNA sequences) rarely cite the Renyi entropy literature.\n",
      "translation_table": [
        {
          "field_a_term": "Renyi entropy H_q of order q (information theory)",
          "field_b_term": "mass exponent tau(q) in multifractal formalism (physics/mathematics)",
          "note": "The Renyi entropy generates the mass exponent tau(q) = (q-1) H_q; the full spectrum f(alpha) follows by Legendre transform"
        },
        {
          "field_a_term": "Shannon entropy H_1 = -sum p_i log p_i (q=1 limit) (information theory)",
          "field_b_term": "information dimension D_1 of a fractal set (fractal geometry)",
          "note": "The q=1 Renyi entropy equals the information dimension; q=0 gives box-counting dimension D_0, q=2 gives correlation dimension D_2"
        },
        {
          "field_a_term": "q-deformed statistics / Tsallis entropy (nonextensive stat mech)",
          "field_b_term": "multifractal measure with Holder exponent alpha (mathematics)",
          "note": "Tsallis entropy is Renyi entropy with different normalization; both parametrize the multifractal spectrum at fixed q"
        },
        {
          "field_a_term": "turbulent energy cascade intermittency (fluid mechanics)",
          "field_b_term": "non-uniform measure on strange attractor / multifractal measure (dynamical systems)",
          "note": "Turbulent velocity increments are multifractal; the spectrum f(alpha) measured via Renyi entropy quantifies intermittency"
        }
      ],
      "references": [
        {
          "doi": "10.1103/PhysRevA.33.1141",
          "note": "Halsey et al. (1986) - Fractal measures and their singularities: the characterization of strange sets; Phys Rev A 33:1141"
        }
      ],
      "last_reviewed": "2026-05-07",
      "file": "cross-domain/physics-math/b-renyi-entropy-x-multifractal.yaml"
    },
    {
      "id": "b-solid-mechanics-x-topology-optimization",
      "title": "Solid Mechanics x Topology Optimization — minimum compliance as material distribution\n",
      "source_domain": "",
      "target_domain": "",
      "status": "proposed",
      "bridge_claim": "Topology optimization (SIMP method) distributes material within a design domain to minimize structural compliance (maximize stiffness) subject to volume constraints; the optimality conditions are equivalent to the adjoint method in PDE-constrained optimization, and solutions exhibit self-similar fra",
      "open_unknowns": [],
      "related_hypotheses": [],
      "communication_gap": "Structural engineers developed layout optimization (Michell 1904) and variational methods separately from mathematical programming and PDE-constrained optimization; the SIMP method (Bendsøe & Kikuchi 1988) was developed empirically before its mathematical foundation in homogenization theory and shape calculus was established by Allaire and Sokolowski in the 1990s-2000s.\n",
      "translation_table": [
        {
          "field_a_term": "SIMP density variable ρ(x) ∈ [0,1]",
          "field_b_term": "Characteristic function of material domain (relaxed)",
          "note": "SIMP relaxes the binary material/void problem to continuous density ρ(x); penalization E(ρ) = ρ^p·E₀ (p=3) forces solutions toward 0/1 density, recovering near-discrete material distributions.\n"
        },
        {
          "field_a_term": "Compliance C = f^T·u (structural flexibility)",
          "field_b_term": "Objective functional in variational optimization",
          "note": "Compliance is the strain energy; minimizing compliance maximizes stiffness for given loads; the adjoint equation for sensitivity ∂C/∂ρ requires solving one extra finite element system (adjoint state = displacement state).\n"
        },
        {
          "field_a_term": "Optimality criteria (OC) update rule",
          "field_b_term": "Fixed-point iteration for KKT conditions",
          "note": "The OC update ρ_new = ρ·(−∂C/∂ρ/λ)^(1/2) is a fixed-point iteration for the KKT conditions of the constrained optimization; it converges faster than gradient descent for this specific problem class.\n"
        },
        {
          "field_a_term": "Michell truss (optimal structure theory)",
          "field_b_term": "Continuous limit of topology optimization as λ→0",
          "note": "Michell (1904) proved that optimal trusses have members oriented along principal stress directions; SIMP topology optimization with fine mesh converges to Michell truss-like architectures — connecting continuum and discrete structural optimization.\n"
        }
      ],
      "references": [
        {
          "doi": "10.1007/BF01650949",
          "note": "Bendsøe & Kikuchi (1988) — Generating optimal topologies in structural design using homogenization; Comput Methods Appl Mech Eng 71:197"
        }
      ],
      "last_reviewed": "2026-05-07",
      "file": "cross-domain/physics-math/b-solid-mechanics-x-topology-optimization.yaml"
    },
    {
      "id": "b-soliton-x-integrable-systems",
      "title": "Solitons ↔ Integrable systems — exact N-soliton solutions via inverse scattering",
      "source_domain": "",
      "target_domain": "",
      "status": "proposed",
      "bridge_claim": "The Korteweg-de Vries equation supports N-soliton solutions that pass through each other unchanged, arising because KdV is a completely integrable Hamiltonian system with infinitely many conserved quantities; the inverse scattering transform (IST) is the nonlinear analogue of Fourier transform for t",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-soliton-x-integrable-systems"
      ],
      "communication_gap": "Solitons were discovered numerically by Zabusky & Kruskal (1965) in a plasma physics context; the inverse scattering transform was developed by Gardner, Greene, Kruskal & Miura (1967) in applied mathematics. Pure mathematicians studying completely integrable systems (Lax, Zakharov, Shabat) developed the algebraic theory in the 1970s. These three communities (plasma physics, applied mathematics, pure mathematics) rarely cited each other's work despite studying identical mathematical structures.",
      "translation_table": [
        {
          "field_a_term": "soliton (stable localised wave that preserves shape after collision)",
          "field_b_term": "eigenvalue of Lax pair Schrödinger operator (integrable systems)",
          "note": "Each soliton corresponds to a discrete eigenvalue of the associated linear scattering problem"
        },
        {
          "field_a_term": "inverse scattering transform (IST) for KdV",
          "field_b_term": "nonlinear Fourier transform decomposing initial data into solitons + radiation",
          "note": "IST exactly solves KdV initial value problem; solitons = discrete spectrum, radiation = continuous spectrum"
        },
        {
          "field_a_term": "infinitely many conservation laws of KdV (I_n = ∫ P_n(u, u_x, ...) dx)",
          "field_b_term": "action variables in completely integrable Hamiltonian system",
          "note": "Conservation laws are in involution under Poisson bracket — complete integrability by Arnold-Liouville theorem"
        },
        {
          "field_a_term": "soliton elastic collision (phase shift only, no energy exchange)",
          "field_b_term": "commuting Hamiltonian flows preserving action variables",
          "note": "Elastic collision is the physical manifestation of mathematical integrability"
        }
      ],
      "references": [
        {
          "doi": "10.1103/PhysRevLett.19.1095",
          "note": "Zabusky & Kruskal (1965) — interaction of solitons in a collisionless plasma; PRL 15:240"
        },
        {
          "doi": "10.1103/PhysRevLett.19.1095",
          "note": "Gardner, Greene, Kruskal & Miura (1967) — method for solving KdV equation; PRL 19:1095"
        }
      ],
      "last_reviewed": "2026-05-07",
      "file": "cross-domain/physics-math/b-soliton-x-integrable-systems.yaml"
    },
    {
      "id": "b-spin-waves-x-magnons",
      "title": "Spin Waves x Magnons — collective excitations as quasiparticles\n",
      "source_domain": "",
      "target_domain": "",
      "status": "proposed",
      "bridge_claim": "Spin waves in ferromagnets (collective precession of magnetic moments) are quantized as magnons — bosonic quasiparticles with a quadratic dispersion relation ω ∝ k²; Holstein-Primakoff transformation maps interacting spins to harmonic oscillators, enabling quantum field theory methods for magnetic s",
      "open_unknowns": [],
      "related_hypotheses": [],
      "communication_gap": "The quasiparticle concept (magnons) was developed by Bloch and Holstein-Primakoff in the 1930-40s using separate formalisms from quantum field theory; the unified second-quantization framework connecting spin waves to QFT methods was consolidated only through condensed matter textbooks decades later.\n",
      "translation_table": [
        {
          "field_a_term": "Spin deviation from ground state",
          "field_b_term": "Boson occupation number (Holstein-Primakoff boson)",
          "note": "Holstein-Primakoff maps spin operators to bosonic creation/annihilation operators: S⁺ ≈ √(2S) a, S⁻ ≈ √(2S) a†, Sz = S - a†a; exact for small deviations.\n"
        },
        {
          "field_a_term": "Magnon dispersion ω(k) ∝ k²",
          "field_b_term": "Quadratic band in momentum space",
          "note": "The quadratic dispersion (unlike linear phonon dispersion) arises from the broken continuous symmetry of the ferromagnet; it is protected by Goldstone's theorem for systems with spin rotational symmetry breaking.\n"
        },
        {
          "field_a_term": "Magnon-magnon interactions (four-magnon scattering)",
          "field_b_term": "Quartic term in boson Hamiltonian",
          "note": "Beyond the harmonic approximation, magnon interactions produce temperature- dependent corrections to magnetization (Bloch T^(3/2) law) calculable by many-body perturbation theory.\n"
        },
        {
          "field_a_term": "Ferromagnetic resonance frequency",
          "field_b_term": "k=0 magnon energy gap",
          "note": "Applied fields open a gap in the magnon spectrum at k=0, directly measurable by ferromagnetic resonance spectroscopy — connecting lab measurement to quasiparticle energy.\n"
        }
      ],
      "references": [
        {
          "doi": "10.1103/PhysRev.58.1098",
          "note": "Holstein & Primakoff (1940) — Field dependence of the intrinsic domain magnetization of a ferromagnet; Phys Rev 58:1098"
        }
      ],
      "last_reviewed": "2026-05-07",
      "file": "cross-domain/physics-math/b-spin-waves-x-magnons.yaml"
    },
    {
      "id": "b-topological-defects-x-homotopy",
      "title": "Topological defects x Homotopy groups — vortices classified by pi_1\n",
      "source_domain": "",
      "target_domain": "",
      "status": "proposed",
      "bridge_claim": "The classification of topological defects in ordered media (vortices in superfluids, dislocations in crystals, monopoles in spin textures) is governed by the homotopy groups of the order parameter space; pi_1 classifies line defects (vortices), pi_2 classifies point defects (hedgehogs), and pi_3 cla",
      "open_unknowns": [],
      "related_hypotheses": [],
      "communication_gap": "Condensed matter physicists discovering vortex quantization and topological defects in the 1960s-70s were largely unaware of the existing homotopy theory in mathematics; the Kibble-Zurek mechanism and defect topology were developed independently before Mermin's 1979 review unified the framework.\n",
      "translation_table": [
        {
          "field_a_term": "vortex in superfluid (physics)",
          "field_b_term": "element of first homotopy group pi_1(S^1) = Z (mathematics)",
          "note": "A superfluid vortex carries quantized circulation; the winding number is an integer element of pi_1"
        },
        {
          "field_a_term": "magnetic monopole or hedgehog defect (physics)",
          "field_b_term": "element of pi_2(S^2) = Z (mathematics)",
          "note": "A hedgehog point defect in a 3D vector field has integer topological charge classified by pi_2"
        },
        {
          "field_a_term": "topological stability of defect (physics)",
          "field_b_term": "non-trivial element of homotopy group (mathematics)",
          "note": "A defect is topologically stable if and only if it corresponds to a non-identity element of the relevant homotopy group"
        },
        {
          "field_a_term": "defect annihilation (physics)",
          "field_b_term": "group multiplication to identity in pi_n (mathematics)",
          "note": "Two defects can annihilate if and only if their homotopy group elements multiply to the identity"
        }
      ],
      "references": [
        {
          "doi": "10.1103/RevModPhys.51.591",
          "note": "Mermin (1979) - The topological theory of defects in ordered media; Rev Mod Phys 51:591"
        }
      ],
      "last_reviewed": "2026-05-07",
      "file": "cross-domain/physics-math/b-topological-defects-x-homotopy.yaml"
    },
    {
      "id": "b-black-holes-information-theory",
      "title": "Bekenstein-Hawking entropy S_BH = A/4l_P² (area law) and the holographic bound connect black hole thermodynamics to information theory; the Page curve and island formula (replica wormholes) resolve Hawking's information paradox by showing entanglement entropy of radiation follows a unitary Page curve via quantum extremal surfaces.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "Bekenstein (1973) proposed that a black hole of horizon area A carries entropy S_BH = kA/4l_P² (in natural units, S_BH = A/4G in Planck units). This is the maximum entropy that can be enclosed in a region of space — the holographic bound (Bousso 2002): any matter system satisfies S ≤ A/4l_P² where A",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-island-formula-entanglement-wedge-quantum-error-correction"
      ],
      "communication_gap": "The information paradox debate spanned quantum gravity (general relativity + QFT), quantum information theory, and string theory — three communities with distinct mathematical formalisms and publication venues (Phys Rev D, JHEP, Commun Math Phys, and arXiv:hep-th). The 2019-2020 resolution via replica wormholes appeared first on arXiv in a burst of simultaneous papers by competing groups. Information theorists working on quantum error correction (e.g., Preskill's group at Caltech) and relativists studying black holes rarely read the same journals, despite the Hayden- Preskill (2007) bridge paper showing black holes are fast scramblers.\n",
      "translation_table": [
        {
          "field_a_term": "Bekenstein-Hawking entropy S_BH = A/4G",
          "field_b_term": "maximum information capacity (bits) stored on a surface of area A"
        },
        {
          "field_a_term": "Hawking temperature T_H = ℏc³/(8πGMk_B)",
          "field_b_term": "thermal noise temperature of black hole as information channel"
        },
        {
          "field_a_term": "Page curve (entanglement entropy S(R) vs. evaporation time)",
          "field_b_term": "unitary information recovery curve of any quantum channel"
        },
        {
          "field_a_term": "island formula quantum extremal surface",
          "field_b_term": "quantum error correction recovery region (entanglement wedge)"
        },
        {
          "field_a_term": "replica wormhole saddle point in gravitational path integral",
          "field_b_term": "off-diagonal terms in the replica trick for Rényi entropy calculation"
        },
        {
          "field_a_term": "holographic bound S ≤ A/4l_P²",
          "field_b_term": "channel capacity bound: information in a region bounded by its surface area"
        }
      ],
      "references": [
        {
          "doi": "10.1103/PhysRevD.7.2333",
          "note": "Bekenstein (1973) Phys Rev D 7:2333 — black holes and entropy"
        },
        {
          "doi": "10.1007/BF01608497",
          "note": "Hawking (1975) Commun Math Phys 43:199 — particle creation by black holes"
        },
        {
          "doi": "10.1103/PhysRevLett.71.3743",
          "note": "Page (1993) Phys Rev Lett 71:3743 — information in black hole radiation"
        },
        {
          "doi": "10.1007/JHEP09(2020)002",
          "note": "Penington (2020) JHEP 09:002 — entanglement wedge reconstruction and the information paradox"
        }
      ],
      "last_reviewed": "2026-05-06",
      "file": "cross-domain/physics-mathematics/b-black-holes-information-theory.yaml"
    },
    {
      "id": "b-fluid-instabilities-bifurcation",
      "title": "Fluid instabilities — Rayleigh-Bénard convection, Kelvin-Helmholtz, Plateau-Rayleigh — are physical realizations of mathematical bifurcations: the transition from laminar to convective flow is a pitchfork bifurcation at Ra_c = 1708, and Lorenz's three-mode truncation of the Bénard equations produced the first mathematical proof of deterministic chaos.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "Rayleigh-Bénard convection: a fluid heated from below and cooled from above undergoes a transition from pure conduction to convective rolls when the Rayleigh number Ra = g*alpha*DeltaT*L³/(nu*kappa) exceeds the critical value Ra_c = 1708 (from linear stability analysis). This transition is a supercr",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-rayleigh-benard-turbulence-bifurcation-cascade"
      ],
      "communication_gap": "Fluid dynamicists publishing in Journal of Fluid Mechanics and Physics of Fluids largely overlap with mathematicians working in nonlinear dynamics, but the connection is not universal. Applied mathematicians familiar with bifurcation theory sometimes lack physical intuition for fluid instabilities, while experimentalists focus on pattern selection without engaging the underlying mathematical framework. Chaos theory (Strogatz 1994 textbook) has helped bridge this gap for students.\n",
      "translation_table": [
        {
          "field_a_term": "Rayleigh number Ra_c = 1708",
          "field_b_term": "bifurcation parameter value at pitchfork bifurcation point",
          "note": "Ra plays the role of the control parameter mu in the normal form X' = mu*X - X³"
        },
        {
          "field_a_term": "convective rolls (stationary pattern)",
          "field_b_term": "stable fixed points emerging at supercritical pitchfork bifurcation",
          "note": "Amplitude of rolls scales as sqrt(Ra - Ra_c) — classic bifurcation scaling"
        },
        {
          "field_a_term": "oscillating convection onset",
          "field_b_term": "Hopf bifurcation — limit cycle emerges from fixed point",
          "note": "Period of oscillation is set by the imaginary part of eigenvalues at criticality"
        },
        {
          "field_a_term": "Lorenz strange attractor",
          "field_b_term": "chaotic attractor with positive Lyapunov exponent, fractal dimension",
          "note": "First mathematical proof that deterministic ODEs can produce unpredictable trajectories"
        },
        {
          "field_a_term": "Plateau-Rayleigh jet breakup at lambda > 2*pi*R",
          "field_b_term": "linear instability threshold — fastest-growing mode sets droplet size",
          "note": "Droplet spacing predicted by maximizing growth rate sigma(k)"
        }
      ],
      "references": [
        {
          "doi": "10.1080/14786441608635602",
          "note": "Rayleigh (1916) Philos Mag 32:529 — linear stability analysis of convection"
        },
        {
          "doi": "10.1175/1520-0469(1963)020<0130:DNF>2.0.CO;2",
          "note": "Lorenz (1963) J Atmos Sci 20:130 — deterministic nonperiodic flow, strange attractor"
        },
        {
          "note": "Drazin & Reid (2004) Hydrodynamic Stability. Cambridge University Press"
        },
        {
          "note": "Strogatz (1994) Nonlinear Dynamics and Chaos. Perseus Books"
        }
      ],
      "last_reviewed": "2026-05-06",
      "file": "cross-domain/physics-mathematics/b-fluid-instabilities-bifurcation.yaml"
    },
    {
      "id": "b-noether-theorem-conservation-laws",
      "title": "Every differentiable symmetry of the action of a physical system corresponds to a conservation law — Noether's theorem is the deepest known connection between the geometry of symmetry groups and the conservation laws of physics.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "Noether's first theorem (1915, published 1918) establishes a bijection between continuous symmetries of the action S = ∫ L dt and conserved quantities (Noether currents/charges). This is not an analogy — it is a theorem with exact mathematical proof.\nThe canonical examples: time translation symmetry",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-noether-symmetry-breaking-new-physics"
      ],
      "communication_gap": "Noether's theorem is taught in advanced theoretical physics but rarely in mathematics curricula, despite being a mathematical theorem. Biologists and complex systems scientists rarely encounter it. The abstract algebraic formulation (Lie algebras, co-adjoint orbits) is inaccessible without significant mathematical training, limiting cross-field application.\n",
      "translation_table": [
        {
          "field_a_term": "continuous symmetry of the action (Lie group G)",
          "field_b_term": "conserved Noether charge Q",
          "note": "One-to-one: each generator of G gives one conserved current"
        },
        {
          "field_a_term": "time translation symmetry (t → t + ε)",
          "field_b_term": "conservation of energy (Hamiltonian H = const)",
          "note": "The most familiar instance — energy conservation from time-homogeneity"
        },
        {
          "field_a_term": "spatial translation symmetry (x → x + a)",
          "field_b_term": "conservation of linear momentum p",
          "note": "Momentum conservation from spatial homogeneity of physical laws"
        },
        {
          "field_a_term": "rotational symmetry SO(3) (x → Rx)",
          "field_b_term": "conservation of angular momentum L = r × p",
          "note": "Angular momentum from isotropy of space"
        },
        {
          "field_a_term": "U(1) gauge symmetry ψ → e^{iα} ψ",
          "field_b_term": "conservation of electric charge Q",
          "note": "The gauge symmetry of electromagnetism gives charge conservation"
        }
      ],
      "references": [
        {
          "note": "Noether (1918) Invariante Variationsprobleme. Nachr Akad Wiss Göttingen 235-257"
        },
        {
          "note": "Kosmann-Schwarzbach (2011) The Noether Theorems: Invariance and Conservation Laws. Springer"
        },
        {
          "note": "Weyl (1929) Elektron und Gravitation. Z Phys 56:330-352"
        },
        {
          "note": "Weinberg (1995) The Quantum Theory of Fields, Vol. 1. Cambridge University Press"
        }
      ],
      "last_reviewed": "2026-05-06",
      "file": "cross-domain/physics-mathematics/b-noether-theorem-conservation-laws.yaml"
    },
    {
      "id": "b-radiocarbon-dating-exponential-decay",
      "title": "Radiocarbon dating applies the first-order decay law N(t) = N0 * exp(-lambda * t) with lambda = ln2 / 5,730 yr to determine the age of organic material, with Bayesian calibration correcting for past atmospheric C-14 variations using dendrochonology",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "Carbon-14 produced by cosmic ray spallation of N-14 enters living organisms at atmospheric concentration N0; after death, N(t) = N0 * exp(-t * ln2 / 5730) with half-life T_1/2 = 5,730 yr (±40 yr); measured 14C/12C ratio gives raw radiocarbon age, corrected to calendar age using the IntCal calibratio",
      "open_unknowns": [],
      "related_hypotheses": [],
      "communication_gap": "Archaeologists interpret radiocarbon dates using calibration software (OxCal, CALIB) without understanding the Bayesian mathematics; nuclear physicists understand isotope decay but rarely work with calibration curve uncertainties; the connection between measurement physics and archaeological chronology requires expertise in both fields.",
      "translation_table": [
        {
          "field_a_term": "14C/12C ratio in archaeological sample",
          "field_b_term": "N(t)/N0 in exponential decay: N(t) = N0 exp(-lambda t)",
          "note": "Ratio measured by AMS (accelerator mass spectrometry); precision +/- 15-30 years for samples < 10,000 yr old"
        },
        {
          "field_a_term": "radiocarbon age BP (before present = before 1950)",
          "field_b_term": "t = ln(N0/N(t)) / lambda from decay equation",
          "note": "Libby half-life (5,568 yr) used for historical reasons; corrected in Bayesian calibration"
        },
        {
          "field_a_term": "IntCal calibration curve",
          "field_b_term": "Bayesian prior: P(calendar age | radiocarbon age)",
          "note": "Wiggles in calibration curve (from solar variation, ocean ventilation) cause age uncertainty plateaus"
        },
        {
          "field_a_term": "stratigraphic prior in Bayesian chronological modeling",
          "field_b_term": "order constraints on calibrated ages in Bayesian posterior",
          "note": "OxCal software implements Bayesian calibration with stratigraphic, sequence, and phase constraints"
        }
      ],
      "references": [
        {
          "doi": "10.1126/science.6000019",
          "note": "Libby et al. (1949) Science - radiocarbon dating technique original publication"
        },
        {
          "doi": "10.1017/S0033822200033865",
          "note": "Reimer et al. (2020) Radiocarbon - IntCal20 calibration curve"
        },
        {
          "doi": "10.1017/S0033822200006895",
          "note": "Bronk Ramsey (2009) Radiocarbon - Bayesian analysis of radiocarbon dates with OxCal"
        }
      ],
      "last_reviewed": "2026-05-07",
      "file": "cross-domain/physics-mathematics/b-radiocarbon-dating-exponential-decay.yaml"
    },
    {
      "id": "b-random-matrix-quantum-chaos",
      "title": "The Bohigas-Giannoni-Schmit conjecture (1984) states that energy level statistics of quantum systems with chaotic classical dynamics follow Gaussian Orthogonal Ensemble (GOE) random matrix statistics, proved for specific systems via Sieber-Richter pairs of correlated periodic orbits, unifying quantum chaos, nuclear physics, and the Riemann zeta function zeros.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "The Bohigas-Giannoni-Schmit (BGS) conjecture (1984): the nearest-neighbor level spacing distribution of quantized chaotic Hamiltonians follows the Gaussian Orthogonal Ensemble (GOE). The Wigner surmise P(s) = (πs/2)exp(-πs²/4) shows level repulsion — P(0)=0, unlike the Poisson distribution P(s)=exp(",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-sieber-richter-pairs-bgs-proof"
      ],
      "communication_gap": "Random matrix theory originated in nuclear physics (Wigner, Dyson, Mehta), developed its own mathematical community (free probability, Wishart matrices in statistics), and was independently adopted in quantum chaos without full cross-citation. Number theorists studying Riemann zeros are rarely in communication with quantum chaos physicists, despite the GUE connection. The SYK/black-hole connection is very recent and still being absorbed by condensed matter physicists.\n",
      "translation_table": [
        {
          "field_a_term": "quantum energy levels of a chaotic Hamiltonian",
          "field_b_term": "eigenvalues of a random matrix from GOE",
          "note": "BGS conjecture — the correspondence holds for all locally chaotic classical limits"
        },
        {
          "field_a_term": "Wigner surmise level repulsion P(0)=0",
          "field_b_term": "vanishing density of states at zero level spacing in GOE",
          "note": "reflects anti-bunching analogous to fermion statistics in the level gas"
        },
        {
          "field_a_term": "Sieber-Richter correlated orbit pairs",
          "field_b_term": "leading non-diagonal contribution in the periodic orbit sum",
          "note": "pairs have same action and nearly identical trajectories linked by time-reversal"
        },
        {
          "field_a_term": "Poisson statistics of integrable system levels",
          "field_b_term": "uncorrelated (GUE eigenvalue level statistics require chaos)",
          "note": "absence of avoided crossings in integrable systems — levels can cross freely"
        },
        {
          "field_a_term": "SYK model all-to-all random interactions",
          "field_b_term": "random matrix Hamiltonian at large N",
          "note": "SYK saturates the MSS chaos bound and is holographically dual to JT gravity"
        }
      ],
      "references": [
        {
          "doi": "10.1103/PhysRevLett.52.1",
          "note": "Bohigas et al. (1984) Characterization of chaotic quantum spectra and universality of level fluctuation laws. Phys Rev Lett 52:1–4"
        },
        {
          "doi": "10.1103/PhysRevLett.93.014103",
          "note": "Müller et al. (2004) Semiclassical foundation of universality in quantum chaos. Phys Rev Lett 93:014103"
        },
        {
          "doi": "10.1016/S0370-1573(97)00088-4",
          "note": "Guhr et al. (1998) Random-matrix theories in quantum physics: common concepts. Phys Rep 299:189–425"
        },
        {
          "note": "Mehta (2004) Random Matrices, 3rd ed; Academic Press, New York"
        }
      ],
      "last_reviewed": "2026-05-06",
      "file": "cross-domain/physics-mathematics/b-random-matrix-quantum-chaos.yaml"
    },
    {
      "id": "b-renormalization-group-fixed-points",
      "title": "Wilson's renormalization group maps RG flow in coupling-constant space onto a dynamical system whose fixed points — classified by their eigenvalue spectrum — determine universality classes of critical phenomena, making the mathematics of continuous-group flows and fixed-point stability the exact language for the physics of second-order phase transitions independent of microscopic details.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "The renormalization group (Wilson 1971) provides the deepest explanation of universality: why systems as microscopically different as magnets, binary fluids, and liquid-gas transitions near their critical points share identical critical exponents. The RG procedure integrates out short-wavelength (hi",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-rg-epsilon-expansion-convergence-nonperturbative-corrections"
      ],
      "communication_gap": "The RG is published primarily in physics journals (Phys Rev B, Rev Mod Phys, Nucl Phys B) using physicists' notation (path integrals, Feynman diagrams) that is opaque to most mathematicians. The rigorous mathematical treatment (Balaban, Gallavotti, Bricmont) uses constructive field theory methods that are unknown to most physicists. The dynamical systems framing of RG fixed points — central to the physics community since Wilson — is rarely stated explicitly in mathematics textbooks on dynamical systems, despite being a natural application. Funding structures separate condensed matter physics from pure mathematics, limiting cross-pollination.\n",
      "translation_table": [
        {
          "field_a_term": "coupling constant flow g(L) under block-spin transformation",
          "field_b_term": "trajectory in a dynamical system ẋ = f(x)",
          "note": "The beta function β(g) plays the role of the vector field f(x)"
        },
        {
          "field_a_term": "RG fixed point g* where β(g*) = 0",
          "field_b_term": "equilibrium point of a dynamical system"
        },
        {
          "field_a_term": "RG eigenvalue y_k (eigenvalue of stability matrix at g*)",
          "field_b_term": "Lyapunov exponent classifying stability of a fixed point"
        },
        {
          "field_a_term": "relevant operator (y_k > 0) — grows under RG iteration",
          "field_b_term": "unstable direction in phase space (positive Lyapunov exponent)"
        },
        {
          "field_a_term": "irrelevant operator (y_k < 0) — shrinks under RG iteration",
          "field_b_term": "stable direction in phase space (negative Lyapunov exponent)"
        },
        {
          "field_a_term": "universality class (same fixed point, same critical exponents)",
          "field_b_term": "basin of attraction of a fixed point in parameter space"
        },
        {
          "field_a_term": "upper critical dimension d_c (Gaussian FP becomes stable)",
          "field_b_term": "bifurcation point (fixed-point stability changes as parameter varies)"
        },
        {
          "field_a_term": "ε-expansion (perturbation theory around d=4)",
          "field_b_term": "center-manifold reduction near a bifurcation"
        }
      ],
      "references": [
        {
          "doi": "10.1016/0370-1573(74)90023-4",
          "note": "Wilson & Kogut (1974) Phys Rep 12:75 — the renormalization group and the ε expansion (comprehensive review)"
        },
        {
          "doi": "10.1103/PhysRevB.4.3174",
          "note": "Wilson (1971) Phys Rev B 4:3174 — renormalization group and critical phenomena I: block spin formulation"
        },
        {
          "doi": "10.1103/RevModPhys.46.597",
          "note": "Fisher (1974) Rev Mod Phys 46:597 — the renormalization group in the theory of critical behavior"
        },
        {
          "note": "Goldenfeld (1992) Lectures on Phase Transitions and the Renormalization Group — Addison-Wesley (textbook treatment)"
        }
      ],
      "last_reviewed": "2026-05-06",
      "file": "cross-domain/physics-mathematics/b-renormalization-group-fixed-points.yaml"
    },
    {
      "id": "b-renormalization-wilson-wavelets-x-wavelet-shrinkage-denoising",
      "title": "Wilson’s renormalization group coarse-grains microscopic fluctuations into fixed-point long-distance physics — Mallat’s multiresolution analysis and orthogonal wavelets implement dyadic scale separation analogous to integrating out shells in momentum space — soft-threshold wavelet denoising discards small coefficients interpreted as “irrelevant” detail at fine scales, mirroring RG irrelevant directions without repeating the established RG×deep-learning bridge elsewhere in the catalog.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "Wavelet bases supply a mathematically controlled hierarchical decomposition of L² signals; Wilson/Kadanoff coarse-graining removes degrees of freedom whose statistical influence shrinks under rescaling — thresholding Gaussian noise in an orthogonal wavelet domain (Donoho–Johnstone) realizes an opera",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-wavelet-subband-energy-tracks-rg-relevant-flux"
      ],
      "communication_gap": "Mathematical statistics treats thresholding as minimax decision theory; Wilsonian RG grew from quantum/statistical field theory — wavelets became the lingua franca linking harmonic analysts and signal processors but rarely appear in RG textbooks alongside diagrammatic formalism.\n",
      "translation_table": [
        {
          "field_a_term": "Integrating out high-k modes in RG transformation",
          "field_b_term": "Orthogonal wavelet decomposition high-frequency subbands",
          "note": "Both organize fluctuation degrees of freedom by spatial scale."
        },
        {
          "field_a_term": "Irrelevant operators under RG flow",
          "field_b_term": "Wavelet coefficients shrunk toward zero by universal threshold rules",
          "note": "Information discarded when statistically negligible at macroscopic observables."
        },
        {
          "field_a_term": "Wilson fixed point / universality class",
          "field_b_term": "Minimax optimal estimator class under Besov smoothness priors",
          "note": "Optimality statements live on different mathematical foundations but share multiscale vocabulary."
        }
      ],
      "references": [
        {
          "doi": "10.1016/0370-1573(74)90023-4",
          "note": "Wilson & Kogut (1974) — RG and ε-expansion foundations"
        },
        {
          "doi": "10.1109/34.192463",
          "note": "Mallat (1989) — multiresolution approximation and wavelet orthonormal bases"
        },
        {
          "doi": "10.1111/j.2517-6161.1995.tb02031.x",
          "note": "Donoho & Johnstone (1995) — wavelet shrinkage minimax estimation via thresholding"
        }
      ],
      "last_reviewed": "2026-05-07",
      "file": "cross-domain/physics-mathematics/b-renormalization-wilson-wavelets-x-wavelet-shrinkage-denoising.yaml"
    },
    {
      "id": "b-statistical-mechanics-information-theory",
      "title": "Statistical Mechanics and Information Theory — Boltzmann entropy and Shannon entropy are formally identical; Jaynes maximum entropy derives equilibrium, Landauer links erasure to thermodynamics",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "The Boltzmann entropy S = k_B ln W and Shannon entropy H = −Σpᵢ log pᵢ are mathematically identical after substituting k_B and adjusting the logarithm base. Boltzmann counts microstates W consistent with a macrostate; Shannon measures uncertainty over a probability distribution — both quantify missi",
      "open_unknowns": [],
      "related_hypotheses": [],
      "communication_gap": "Statistical mechanics and information theory developed in parallel communities (Boltzmann/Gibbs in physics; Shannon/Wiener in electrical engineering) using similar mathematical structures without substantial cross-citation until Jaynes (1957) and later the Maximum Entropy formalism. Many physicists remain unfamiliar with the full scope of information-theoretic interpretations of thermodynamics, and many information theorists are unaware of how closely their machinery mirrors statistical mechanics. The connection to Bayesian inference (thermodynamic free energy = Bayesian evidence) remains underappreciated outside specialist communities.\n",
      "translation_table": [
        {
          "field_a_term": "Boltzmann entropy S = k_B ln W",
          "field_b_term": "Shannon entropy H = −Σpᵢ log pᵢ (with k_B replacing log base)",
          "note": "Identical expressions; W = 1/p for uniform distributions gives S = k_B·H·ln2 exactly"
        },
        {
          "field_a_term": "maximum entropy principle (canonical ensemble)",
          "field_b_term": "least-biased probability assignment consistent with known constraints",
          "note": "Jaynes (1957): thermodynamic equilibrium is the maximum-ignorance distribution given energy conservation"
        },
        {
          "field_a_term": "partition function Z = Σ e^{−βEᵢ}",
          "field_b_term": "moment-generating function of the energy distribution",
          "note": "All thermodynamic quantities follow from derivatives of ln Z; analogous to the cumulant-generating function in probability theory"
        },
        {
          "field_a_term": "mutual information I(X;Y)",
          "field_b_term": "thermodynamic free energy gain from correlations",
          "note": "I(X;Y) = F_correlated − F_uncorrelated in units of k_BT; measures usefulness of correlation for prediction"
        },
        {
          "field_a_term": "fluctuation-dissipation theorem",
          "field_b_term": "linear response from equilibrium correlations",
          "note": "Dissipation (viscosity, conductivity) = equilibrium fluctuations (noise); no additional information needed"
        },
        {
          "field_a_term": "Landauer limit k_BT ln 2 per bit erased",
          "field_b_term": "minimum heat dissipated per irreversible logical operation",
          "note": "Modern CPUs dissipate ~10⁶ × Landauer limit per operation; near-Landauer operation requires reversible logic gates"
        }
      ],
      "references": [
        {
          "note": "Boltzmann (1877) Wien Ber 76:373 — entropy and counting microstates"
        },
        {
          "doi": "10.1002/j.1538-7305.1948.tb01338.x",
          "note": "Shannon (1948) Bell Syst Tech J 27:379 — mathematical theory of communication"
        },
        {
          "doi": "10.1103/PhysRev.106.620",
          "note": "Jaynes (1957) Phys Rev 106:620 — information theory and statistical mechanics"
        },
        {
          "doi": "10.1147/rd.53.0183",
          "note": "Landauer (1961) IBM J Res Dev 5:183 — irreversibility and heat generation in computing"
        }
      ],
      "last_reviewed": "2026-05-06",
      "file": "cross-domain/physics-mathematics/b-statistical-mechanics-information-theory.yaml"
    },
    {
      "id": "b-topology-condensed-matter-tqft",
      "title": "Topological quantum field theory classifies phases of matter by topological invariants rather than order parameters, extending Landau's paradigm and explaining the quantised conductance of the quantum Hall effect as a Chern number.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "Witten's topological quantum field theories (TQFTs, 1988) classify physical systems by topological invariants that are robust to any smooth deformation — they cannot change without a phase transition. The quantum Hall conductance σ_xy = ne²/h is quantised because n is the Chern number: an integer to",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-chern-number-tis-robustness"
      ],
      "communication_gap": "Topology and K-theory are taught in pure mathematics programs; condensed matter physics programs rarely teach differential geometry or algebraic topology at the depth needed to derive the TKNN invariant. The connection was made by physicists (Thouless, Witten) working at the interface, and Nobel recognition (2016) has increased awareness but graduate training remains siloed.\n",
      "translation_table": [
        {
          "field_a_term": "Chern number (first Chern class of Berry bundle)",
          "field_b_term": "Hall conductance quantum n (integer)",
          "note": "n = (1/2π)∮ Ω(k) d²k; topological invariant; robust to disorder"
        },
        {
          "field_a_term": "topological phase transition",
          "field_b_term": "gap closing in bulk band structure",
          "note": "Topological invariant can only change when the bulk gap closes; protected surface states"
        },
        {
          "field_a_term": "K-theory classification of vector bundles",
          "field_b_term": "periodic table of topological insulators/superconductors",
          "note": "10 symmetry classes × 3 spatial dimensions = 30 possible topological phases"
        },
        {
          "field_a_term": "Chern-Simons TQFT",
          "field_b_term": "fractional quantum Hall state (ν=1/3 Laughlin state)",
          "note": "Effective low-energy theory; quasi-particles are anyons with fractional statistics"
        }
      ],
      "references": [
        {
          "doi": "10.1007/BF01217730",
          "note": "Witten, E. (1988). Topological quantum field theory. Commun Math Phys 117:353."
        },
        {
          "doi": "10.1103/PhysRevLett.49.405",
          "note": "Thouless, D.J. et al. (1982). Quantised Hall conductance in a two-dimensional periodic potential. Phys Rev Lett 49:405."
        },
        {
          "doi": "10.1063/1.3149495",
          "note": "Kitaev, A. (2009). Periodic table for topological insulators and superconductors. AIP Conf Proc 1134:22."
        },
        {
          "doi": "10.1103/RevModPhys.82.3045",
          "note": "Hasan, M.Z. & Kane, C.L. (2010). Colloquium: topological insulators. Rev Mod Phys 82:3045."
        }
      ],
      "last_reviewed": "2026-05-06",
      "file": "cross-domain/physics-mathematics/b-topology-condensed-matter-tqft.yaml"
    },
    {
      "id": "b-topology-knot-invariants-physics",
      "title": "Wilson loops in Chern-Simons gauge theory equal Jones polynomial knot invariants (Witten 1989) — the expectation value ⟨W(C)⟩ of the Wilson loop along closed curve C computes the Jones polynomial of knot C, giving a physical interpretation of purely mathematical knot invariants as partition functions of topological quantum field theories.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "Witten (1989) showed that the partition function of SU(2) Chern-Simons theory on a 3-manifold M equals the Jones polynomial V_K(q) of a knot K = C embedded in M, where q = exp(2πi/(k+2)) and k is the Chern-Simons coupling. The Wilson loop operator W_R(C) = Tr_R P exp(i∮_C A) computes the holonomy of",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-3manifold-invariants-topological-completeness"
      ],
      "communication_gap": "Knot theory developed in pure mathematics departments (Jones, Thurston, Freedman) completely unaware of gauge theory. Witten's 1989 paper bridging the fields required understanding two expert literatures. Atiyah (1990) wrote The Geometry and Physics of Knots specifically to make this connection accessible to mathematicians. Topological quantum computing groups in condensed matter physics cite knot theory papers rarely, despite computing Jones polynomials by design.\n",
      "translation_table": [
        {
          "field_a_term": "Chern-Simons action S = (k/4π)∫Tr(A∧dA + (2/3)A∧A∧A)",
          "field_b_term": "Jones polynomial V_K(t) via skein relation",
          "note": "physical coupling k corresponds to variable t = exp(2πi/(k+2)) in the polynomial"
        },
        {
          "field_a_term": "Wilson loop W_R(C) = Tr_R P exp(i∮A)",
          "field_b_term": "knot invariant associated to framing of K in representation R",
          "note": "different representations R give different colored Jones polynomials"
        },
        {
          "field_a_term": "topological quantum field theory (no local degrees of freedom)",
          "field_b_term": "topological invariant (independent of metric, depends only on topology)",
          "note": "both objects are metric-independent — the fundamental reason the connection works"
        },
        {
          "field_a_term": "anyonic braiding in quantum Hall state",
          "field_b_term": "braid group representation → quantum gate",
          "note": "topological quantum computing encodes gates as knot invariant computations"
        }
      ],
      "references": [
        {
          "doi": "10.1007/BF01217730",
          "note": "Witten (1989) — Quantum field theory and the Jones polynomial; Commun Math Phys 121:351"
        },
        {
          "doi": "10.2307/1971403",
          "note": "Jones (1985) — A polynomial invariant for knots via von Neumann algebras; Bull AMS 12:103"
        },
        {
          "note": "Adams (1994) — The Knot Book; W.H. Freeman"
        },
        {
          "note": "Atiyah (1990) — The Geometry and Physics of Knots; Cambridge University Press"
        }
      ],
      "last_reviewed": "2026-05-06",
      "file": "cross-domain/physics-mathematics/b-topology-knot-invariants-physics.yaml"
    },
    {
      "id": "b-turbulence-renormalization-group",
      "title": "Kolmogorov's 1941 scaling law for the turbulent energy spectrum E(k) ~ k^{-5/3} in the inertial range is derived from a renormalization-group (RG) fixed point of the Navier-Stokes equations in momentum space: the RG flow drives the system to a universal scaling regime independent of the large-scale energy injection mechanism.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "Kolmogorov (1941) argued that in the inertial range (injection scale L >> l >> dissipation scale η), energy cascades from large to small eddies at a constant rate ε, giving E(k) ~ ε^{2/3} k^{-5/3}. Yakhot & Orszag (1986) derived this using dynamic RG (DRG) applied to the Navier-Stokes equations: int",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-navier-stokes-rg-fixed-point-intermittency-exponents"
      ],
      "communication_gap": "Fluid dynamicists and turbulence engineers use the K41 spectrum empirically and Smagorinsky eddy viscosity models without engaging with the RG derivation. Field theorists who developed DRG (Yakhot & Orszag 1986) publish in Physical Review Letters, not in the Journal of Fluid Mechanics where turbulence experimentalists publish.\n",
      "translation_table": [
        {
          "field_a_term": "Turbulent energy spectrum E(k) ~ k^{-5/3}",
          "field_b_term": "Correlation function at the RG fixed point (power-law scaling)",
          "note": "Fixed-point scaling gives E(k) ~ k^{-(d+2χ)/z} which equals k^{-5/3} for K41 exponents"
        },
        {
          "field_a_term": "Inertial-range energy cascade (constant flux ε)",
          "field_b_term": "RG fixed-point flow: marginal operator with zero beta function",
          "note": "Constant energy flux ↔ zero beta function for the dissipation coupling at the fixed point"
        },
        {
          "field_a_term": "Kolmogorov microscale η = (ν³/ε)^{1/4}",
          "field_b_term": "RG flow cutoff / ultraviolet scale below which fixed point breaks down",
          "note": "Molecular viscosity ν terminates the RG cascade at scale η"
        },
        {
          "field_a_term": "Intermittency corrections to K41 (anomalous exponents)",
          "field_b_term": "Irrelevant operators at the RG fixed point (perturbative corrections)",
          "note": "Multifractal exponents ζ_p ≠ p/3 arise from corrections to scaling at the RG fixed point"
        }
      ],
      "references": [
        {
          "doi": "10.1103/PhysRevLett.57.1722",
          "note": "Yakhot & Orszag (1986) PRL – renormalization group analysis of turbulence; derivation of K41 exponents"
        },
        {
          "doi": "10.1063/1.866483",
          "note": "Yakhot & Orszag (1986) J. Sci. Comput. – renormalisation group and large eddy simulation"
        },
        {
          "doi": "10.1146/annurev.fluid.30.1.275",
          "note": "Frisch (1995) – turbulence: the legacy of A.N. Kolmogorov; RG and multifractal theory"
        },
        {
          "arxiv": "nlin/0404058",
          "note": "Eyink & Sreenivasan – Onsager and the theory of hydrodynamic turbulence; RG connections"
        }
      ],
      "last_reviewed": "2026-05-07",
      "file": "cross-domain/physics-mathematics/b-turbulence-renormalization-group.yaml"
    },
    {
      "id": "b-zeeman-effect-symmetry-breaking-angular-momentum",
      "title": "The Zeeman effect — splitting of atomic spectral lines in a magnetic field — is the physical realization of symmetry breaking of the rotation group SO(3), connecting atomic spectroscopy to representation theory of Lie groups and the mathematics of angular momentum.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "Without a magnetic field, atomic states with the same principal quantum number n and angular momentum l but different magnetic quantum number m are degenerate — they form an irreducible representation (irrep) of SO(3) of dimension 2l+1. A magnetic field B breaks SO(3) → U(1) (rotational symmetry → a",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-lie-group-representation-spectroscopic-rules"
      ],
      "communication_gap": "Atomic physicists and pure mathematicians working on Lie group representation theory share the same formalism but rarely interact; most atomic physics textbooks derive angular momentum algebra without noting its Lie-theoretic structure, leaving physicists unaware of generalization tools from pure mathematics.\n",
      "translation_table": [
        {
          "field_a_term": "magnetic quantum number m_l (atomic physics)",
          "field_b_term": "weight of SO(3) irreducible representation (mathematics)",
          "note": "m_l labels the basis states of the (2l+1)-dimensional irrep of SO(3)"
        },
        {
          "field_a_term": "degeneracy lifting in magnetic field (atomic physics)",
          "field_b_term": "symmetry breaking SO(3) → U(1) (mathematics)",
          "note": "External field selects a preferred axis, reducing the symmetry group"
        },
        {
          "field_a_term": "Landé g-factor (atomic physics)",
          "field_b_term": "Clebsch-Gordan decomposition of coupled angular momenta (mathematics)",
          "note": "g_J mixes orbital and spin angular momenta via CG coefficients for j=l⊕s"
        },
        {
          "field_a_term": "selection rules Δm = 0, ±1 (atomic physics)",
          "field_b_term": "Wigner-Eckart theorem for rank-1 tensor operator (mathematics)",
          "note": "Dipole operator is an SO(3) rank-1 tensor; WE theorem gives the selection rules exactly"
        }
      ],
      "references": [
        {
          "doi": "10.1007/BF01339938",
          "note": "Zeeman (1897) — original observation of magnetic splitting of spectral lines"
        },
        {
          "doi": "10.1007/BF02395808",
          "note": "Wigner (1931) — group theory and quantum mechanics (angular momentum irreps)"
        },
        {
          "doi": "10.1103/PhysRev.43.553",
          "note": "Condon & Shortley (1935) — theory of atomic spectra and Clebsch-Gordan coefficients"
        }
      ],
      "last_reviewed": "2026-05-07",
      "file": "cross-domain/physics-mathematics/b-zeeman-effect-symmetry-breaking-angular-momentum.yaml"
    },
    {
      "id": "b-zeeman-multiplet-x-rmt-level-spacing",
      "title": "Zeeman fine-structure multiplets in atoms ↔ unfolded energy-level spacing statistics in quantum chaos and random-matrix theory (atomic physics ↔ mathematical physics)\n",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "In complex atoms and molecules at energies where the single-particle picture mixes strongly, nearest-neighbor spacing distributions of highly excited levels often match random-matrix ensembles (GOE/GUE/GSE) after unfolding to remove smooth secular trends. Zeeman splitting lifts degeneracies and resh",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-zeeman-multiplet-spacing-shows-quantum-chaos-statistics"
      ],
      "communication_gap": "Atomic spectroscopy papers emphasize term symbols and radiative widths; RMT quantum chaos literature emphasizes universality classes and long-range correlations. Tooling (unfolding algorithms, choice of energy window) is shared in principle but not always co-taught.\n",
      "translation_table": [
        {
          "field_a_term": "Landé g-factor and magnetic quantum number m_J",
          "field_b_term": "symmetry class labels affecting RMT universality (GOE vs GUE)",
          "note": "Magnetic fields can drive GOE→GUE crossover phenomenology in suitable systems."
        },
        {
          "field_a_term": "avoided crossings in Stark/Zeeman maps",
          "field_b_term": "level repulsion in unfolded spectra",
          "note": "Shared Wigner-like small-spacing behavior in generic chaotic regimes."
        },
        {
          "field_a_term": "mean level density (slow variation)",
          "field_b_term": "unfolding procedure removing secular trend",
          "note": "Unfolding is mandatory before comparing to RMT reference curves."
        }
      ],
      "references": [
        {
          "doi": "10.1103/PhysRevLett.52.1",
          "note": "Bohigas, Giannoni & Schmit (1984) — chaos and spectral fluctuation properties (RMT anchor)."
        },
        {
          "doi": "10.1103/RevModPhys.81.1629",
          "note": "Stockmann (2009) — quantum chaos and spectral statistics (Rev. Mod. Phys.)."
        }
      ],
      "last_reviewed": "2026-05-07",
      "file": "cross-domain/physics-mathematics/b-zeeman-multiplet-x-rmt-level-spacing.yaml"
    },
    {
      "id": "b-scale-free-networks-criticality",
      "title": "Barabási-Albert preferential attachment ↔ criticality ↔ brain connectome ↔ internet topology",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "Barabási & Albert (1999) showed that networks grown by preferential attachment — where new nodes connect preferentially to high-degree nodes (\"rich get richer\") — produce scale-free degree distributions P(k) ~ k^{-gamma} with gamma ≈ 3. Scale-free networks are critical in a precise sense: they sit a",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-scale-free-criticality-brain-hub-vulnerability"
      ],
      "communication_gap": "Barabási & Albert published in Science (1999) to a broad audience, but the connection to statistical physics criticality (diverging susceptibility, percolation) was made explicit only by Dorogovtsev & Mendes (2002) in physics journals. Neuroscientists studying connectome topology (Sporns, van den Heuvel) independently discovered hub structure but rarely cite the Barabási scale-free network literature. Internet engineers designing routing protocols are largely unaware of the critical-state implications of scale-free topology.\n",
      "translation_table": [
        {
          "field_a_term": "preferential attachment probability ∝ k_i",
          "field_b_term": "synaptogenesis biased toward highly-connected neurons",
          "note": "Activity-dependent synaptogenesis preferentially connects active (high-k) nodes"
        },
        {
          "field_a_term": "scale-free degree distribution P(k) ~ k^{-gamma}",
          "field_b_term": "brain hub distribution / internet router degree distribution",
          "note": "Same exponent gamma ≈ 2-3 across vastly different network types"
        },
        {
          "field_a_term": "diverging second moment (gamma ≤ 3)",
          "field_b_term": "infinite susceptibility = structural criticality",
          "note": "Network is maximally sensitive: one hub failure cascades globally"
        },
        {
          "field_a_term": "Molloy-Reed criterion for giant component",
          "field_b_term": "percolation transition in brain / internet robustness",
          "note": "Scale-free networks are robust to random failure but fragile to targeted hub removal"
        },
        {
          "field_a_term": "rich-club coefficient",
          "field_b_term": "brain rich club: hub-to-hub connections over-represented",
          "note": "A universal feature of scale-free networks found in brains and internet"
        }
      ],
      "references": [
        {
          "doi": "10.1126/science.286.5439.509",
          "note": "Barabási & Albert (1999) Science - preferential attachment and scale-free networks"
        },
        {
          "doi": "10.1103/RevModPhys.74.47",
          "note": "Albert & Barabási (2002) Rev. Mod. Phys. - comprehensive review"
        },
        {
          "doi": "10.1073/pnas.0501426102",
          "note": "van den Heuvel & Sporns (2011) J. Neurosci. - rich club in human connectome"
        },
        {
          "doi": "10.1038/nature04085",
          "note": "Gallos et al. (2007) - fractal and self-similar organization of brain networks"
        }
      ],
      "last_reviewed": "2026-05-05",
      "file": "cross-domain/physics-networks/b-scale-free-networks-criticality.yaml"
    },
    {
      "id": "b-criticality-neuroscience",
      "title": "Brain-state transitions between avalanche-criticality and sub/super-critical regimes mirror second-order phase transitions in condensed-matter physics.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "proposed",
      "bridge_claim": "Neural avalanches (cascades of activity that follow a power-law size distribution) are the biological signature of a system operating near a second-order phase transition — the same mathematical structure that describes, e.g., the Ising model at its critical temperature, ferro-to-paramagnetic transi",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-criticality-conscious-integration"
      ],
      "communication_gap": "Neuroscience PhD training rarely includes statistical mechanics beyond basic probability; condensed-matter programmes rarely include biophysics. High-dimensional electrode data and fMRI signals require preprocessing pipelines unfamiliar to physicists. Publication venues do not overlap: Physical Review Letters vs Nature Neuroscience audiences are almost non-overlapping.\n",
      "translation_table": [
        {
          "field_a_term": "critical temperature T_c",
          "field_b_term": "balanced excitation/inhibition ratio",
          "note": "Control parameter whose value determines the phase"
        },
        {
          "field_a_term": "order parameter (magnetisation)",
          "field_b_term": "mean firing rate / synchrony level",
          "note": "Observable that is zero in one phase, non-zero in another"
        },
        {
          "field_a_term": "correlation length ξ → ∞",
          "field_b_term": "long-range spatial correlations in LFP/fMRI",
          "note": "Diverging correlation length at criticality"
        },
        {
          "field_a_term": "power-law exponent (universality class)",
          "field_b_term": "neural avalanche exponent τ ≈ 1.5",
          "note": "Universal exponents independent of microscopic details"
        },
        {
          "field_a_term": "self-organised criticality",
          "field_b_term": "homeostatic synaptic plasticity",
          "note": "Mechanism by which the system tunes itself toward the critical point"
        }
      ],
      "references": [
        {
          "arxiv": "2405.07163",
          "note": "Harvested paper seeding u-brain-criticality-function"
        },
        {
          "doi": "10.1038/35022102",
          "note": "Beggs & Plenz 2003 — foundational neural avalanche paper"
        },
        {
          "doi": "10.1038/nphys2162",
          "note": "Chialvo 2010 — emergent complex neural dynamics as adaptation"
        }
      ],
      "last_reviewed": "2026-05-04",
      "file": "cross-domain/physics-neuroscience/b-criticality-neuroscience.yaml"
    },
    {
      "id": "b-fluid-dynamics-glymphatic",
      "title": "Navier-Stokes fluid dynamics and Biot poroelastic theory govern cerebrospinal fluid flow through the brain's glymphatic system, where arterial pulsations drive bulk CSF clearance of amyloid-β and tau via perivascular channels lined with aquaporin-4 water channels on astrocyte endfeet.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "The brain's glymphatic system is a fluid hydraulic machine governed by classical fluid mechanics. Arterial pulsations (cardiac cycle, ~1 Hz) create oscillatory pressure gradients ΔP ≈ 2–4 mmHg that drive CSF through the perivascular space (Virchow-Robin spaces, ~20 μm diameter). The Navier-Stokes eq",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-csf-pulsatile-flow-amyloid-clearance-sleep-deprivation"
      ],
      "communication_gap": "Fluid mechanics and neuroscience rarely intersect at the journal level. The glymphatic hypothesis (Nedergaard 2013) was published in Science but largely ignored by the fluid dynamics community until ~2018 when biophysicists started applying Biot theory. Neuroscientists lack training in poroelastic modeling; fluid mechanicists lack access to in-vivo brain imaging data. The two-photon microscopy and phase-contrast MRI methods are niche tools unfamiliar to most physicists.\n",
      "translation_table": [
        {
          "field_a_term": "Navier-Stokes pressure gradient ∇P",
          "field_b_term": "CSF driving force from arterial pulsation",
          "note": "Cardiac-cycle pulsatility is the primary pump; respiratory oscillations secondary"
        },
        {
          "field_a_term": "Darcy permeability k of porous medium",
          "field_b_term": "hydraulic conductance of perivascular space and parenchyma",
          "note": "AQP4 channel density is the biological knob controlling effective k"
        },
        {
          "field_a_term": "Biot poroelastic coupling coefficient",
          "field_b_term": "brain tissue viscoelasticity coupling fluid and solid deformation",
          "note": "Brain has G' ≈ 1 kPa, orders of magnitude softer than engineering materials"
        },
        {
          "field_a_term": "Reynolds number Re = ρUL/μ",
          "field_b_term": "CSF flow regime (Re << 1, purely Stokes flow)",
          "note": "Perivascular Re ~ 10⁻³ — viscous forces dominate completely; no turbulence"
        },
        {
          "field_a_term": "oscillatory boundary layer (Womersley number Wo)",
          "field_b_term": "pulsatile CSF flow profile through perivascular channels",
          "note": "Wo = R√(ω/ν) ~ 0.1 in perivascular spaces — quasi-steady approximation valid"
        }
      ],
      "references": [
        {
          "doi": "10.1126/scitranslmed.3003748",
          "note": "Iliff et al. (2012) A paravascular pathway facilitates CSF flow through the brain parenchyma. Sci Transl Med 4:147ra111"
        },
        {
          "doi": "10.1126/science.1240672",
          "note": "Nedergaard (2013) Garbage truck of the brain. Science 340:1529"
        },
        {
          "doi": "10.1038/s41467-018-07318-3",
          "note": "Mestre et al. (2018) Flow of cerebrospinal fluid is driven by arterial pulsations and is reduced in hypertension. Nat Commun 9:4878"
        },
        {
          "doi": "10.1063/1.1712886",
          "note": "Biot (1941) General theory of three-dimensional consolidation. J Appl Phys 12:155"
        }
      ],
      "last_reviewed": "2026-05-06",
      "file": "cross-domain/physics-neuroscience/b-fluid-dynamics-glymphatic.yaml"
    },
    {
      "id": "b-hopfield-spin-glass",
      "title": "Hopfield networks store memories as energy minima of E = -½Σ Wᵢⱼsᵢsⱼ — formally identical to the Ising spin glass Hamiltonian — and their storage capacity ~0.14N and catastrophic forgetting transition are calculated exactly by Parisi's replica method from spin glass theory.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "The Hopfield network (1982) defines an energy function for a network of N binary neurons sᵢ ∈ {-1, +1} with symmetric weights Wᵢⱼ:\n\n  E = -½ Σᵢ≠ⱼ Wᵢⱼ sᵢ sⱼ\n\nThis is formally identical to the Ising spin glass Hamiltonian used in condensed matter physics, where sᵢ are spin variables and Wᵢⱼ are random",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-dense-hopfield-transformer-attention-unified"
      ],
      "communication_gap": "Hopfield's 1982 paper was primarily read by neuroscientists and computer scientists; the spin glass physics connection became explicit only after Amit, Gutfreund & Sompolinsky (1985) applied the replica method. Statistical mechanics of neural networks became a distinct subfield (published in Physical Review), largely disconnected from both experimental neuroscience and modern machine learning until the 2020 dense Hopfield / transformer connection revived the bridge.\n",
      "translation_table": [
        {
          "field_a_term": "spin variable sᵢ ∈ {-1, +1}",
          "field_b_term": "binary neuron state (inactive / active)",
          "note": "Ising spins and McCulloch-Pitts neurons have identical mathematical representations"
        },
        {
          "field_a_term": "exchange coupling Jᵢⱼ (random, quenched)",
          "field_b_term": "synaptic weight Wᵢⱼ (set by Hebbian learning)",
          "note": "In spin glasses Jᵢⱼ is random; in Hopfield networks Wᵢⱼ is structured but mathematically equivalent"
        },
        {
          "field_a_term": "replica symmetry breaking (RSB, Parisi order parameter)",
          "field_b_term": "catastrophic forgetting transition at α > 0.14",
          "note": "RSB marks the transition from retrieval phase to spin-glass (confused) phase in the Hopfield model"
        },
        {
          "field_a_term": "free energy landscape F({m^μ})",
          "field_b_term": "basin of attraction structure in associative memory",
          "note": "Memory retrieval = finding a local minimum of the free energy; memories are metastable states"
        },
        {
          "field_a_term": "temperature T (thermal noise)",
          "field_b_term": "neural noise / stochastic firing",
          "note": "Both parametrise the width of the Boltzmann distribution over configurations"
        }
      ],
      "references": [
        {
          "doi": "10.1073/pnas.79.8.2554",
          "note": "Hopfield, J.J. (1982). Neural networks and physical systems with emergent collective computational abilities. PNAS 79:2554–2558."
        },
        {
          "doi": "10.1103/PhysRevA.32.1007",
          "note": "Amit, D.J., Gutfreund, H. & Sompolinsky, H. (1985). Spin-glass models of neural networks. Phys. Rev. A 32:1007."
        },
        {
          "doi": "10.1103/PhysRevLett.43.1754",
          "note": "Parisi, G. (1979). Infinite number of order parameters for spin-glasses. Phys. Rev. Lett. 43:1754."
        },
        {
          "note": "Mezard, M., Parisi, G. & Virasoro, M.A. (1987). Spin Glass Theory and Beyond. World Scientific. -- Comprehensive treatment of spin glass physics and its neural network applications"
        },
        {
          "doi": "10.48550/arXiv.2008.02217",
          "note": "Ramsauer, H. et al. (2020). Hopfield Networks is All You Need. ICLR 2021. — dense Hopfield ↔ transformer attention"
        }
      ],
      "last_reviewed": "2026-05-06",
      "file": "cross-domain/physics-neuroscience/b-hopfield-spin-glass.yaml"
    },
    {
      "id": "b-materials-consciousness-criticality",
      "title": "Phase transitions near the critical point in disordered materials and the neural dynamics associated with consciousness share mathematical structure through self-organised criticality",
      "source_domain": "",
      "target_domain": "",
      "status": "proposed",
      "bridge_claim": "Self-organised criticality (SOC) in neural networks, proposed as a substrate for consciousness and optimal information processing, shares its mathematical formalism with critical phenomena in disordered materials systems such as amorphous metals and spin glasses. In both cases, the system self-tunes",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-recurrent-processing-consciousness"
      ],
      "communication_gap": "Materials scientists working on disordered systems and neuroscientists studying criticality both use SOC language but attend entirely separate conferences. No active collaboration between condensed matter physics groups and neuroscience criticality groups has resulted in mutual methodological transfer.\n",
      "translation_table": [
        {
          "field_a_term": "spin glass order parameter",
          "field_b_term": "neural synchrony order parameter",
          "note": "Edwards-Anderson order parameter in spin glasses maps to the global synchronisation measure in neural field theories"
        },
        {
          "field_a_term": "avalanche size distribution in sandpile",
          "field_b_term": "neural avalanche power law",
          "note": "Both follow P(s) ~ s^-3/2 consistent with mean-field branching process criticality"
        },
        {
          "field_a_term": "Griffiths phase in disordered magnets",
          "field_b_term": "Griffiths phase in heterogeneous neural networks",
          "note": "Network heterogeneity extends the critical region in both systems, potentially explaining robustness of neural criticality"
        }
      ],
      "references": [],
      "last_reviewed": "2026-05-05",
      "file": "cross-domain/physics-neuroscience/b-materials-consciousness-criticality.yaml"
    },
    {
      "id": "b-poisson-counting-process-x-decay-spike-train-likelihood",
      "title": "Poisson counting-process models connect radioactive decay event counts and neural spike-train likelihoods: independent rare events produce exponential waiting times and count variance equal to the mean, while deviations expose refractory periods, bursting, or nonstationary rates.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "The common object is the point process likelihood, not a claim that nuclei and neurons share mechanisms. Radioactive decay offers the memoryless baseline; neural spike trains use the same null model before testing refractory, history-dependent, or stimulus-driven departures.\n",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-time-rescaled-residuals-separate-poisson-from-bursty-counting-systems"
      ],
      "communication_gap": "Physics teaching treats Poisson processes as decay examples, while computational neuroscience uses point-process GLMs with little reference back to radioactive-decay null models.\n",
      "translation_table": [
        {
          "field_a_term": "decay rate lambda",
          "field_b_term": "conditional intensity lambda(t | history)",
          "note": "Both set expected event counts per time."
        },
        {
          "field_a_term": "exponential waiting-time distribution",
          "field_b_term": "inter-spike interval null model",
          "note": "Memorylessness is the baseline to falsify."
        },
        {
          "field_a_term": "overdispersion versus Poisson counts",
          "field_b_term": "burstiness or rate heterogeneity diagnostic",
          "note": "Fano-factor deviations need mechanism-specific follow-up."
        }
      ],
      "references": [
        {
          "doi": "10.1038/nn0504-456",
          "note": "Brown et al. review point-process approaches for neural spike trains."
        }
      ],
      "last_reviewed": "2026-05-07",
      "file": "cross-domain/physics-neuroscience/b-poisson-counting-process-x-decay-spike-train-likelihood.yaml"
    },
    {
      "id": "b-quantum-biology-neural-computation",
      "title": "Three experimentally established quantum biological phenomena — photosynthetic exciton coherence, radical-pair magnetoreception in cryptochrome, and enzyme quantum tunneling — raise the contested question of whether quantum coherence plays a computational role in neural microtubules (Penrose-Hameroff Orch-OR), pitting quantum physics against decoherence timescale arguments in neuroscience.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "contested",
      "bridge_claim": "Three quantum biological phenomena are now experimentally established at physiological temperatures:\n(1) Photosynthetic quantum coherence: Fleming and Engel et al. (2007) observed quantum beats in 2D electronic spectroscopy of the Fenna-Matthews- Olson (FMO) light-harvesting complex at 277 K. Excito",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-orch-or-quantum-consciousness-decoherence-timescale-refutes"
      ],
      "communication_gap": "Quantum physicists studying decoherence in biological systems (publishing in Physical Review Letters, Nature Physics) use terminology and methods (Lindblad master equations, two-dimensional electronic spectroscopy) unfamiliar to neuroscientists. Neuroscientists studying microtubules focus on cytoskeletal dynamics, mitosis, and axonal transport — rarely cite quantum physics. The Penrose-Hameroff hypothesis is widely discussed in consciousness studies and philosophy but dismissed by most neuroscientists and quantum physicists as lacking experimental support. The genuine quantum biology phenomena (photosynthesis, magnetoreception) are rarely connected to neuroscience in teaching or review articles.\n",
      "translation_table": [
        {
          "field_a_term": "Quantum coherence lifetime (fs timescale, FMO complex)",
          "field_b_term": "Functionally relevant quantum dynamics window for neural computation",
          "note": "Photosynthesis demonstrates quantum coherence useful at 10⁻¹³ s; neural needs 10⁻³ s"
        },
        {
          "field_a_term": "Singlet-triplet mixing by magnetic field (radical pair)",
          "field_b_term": "Magnetic sense — compass direction encoded in quantum spin chemistry",
          "note": "Bridge from quantum physics to animal behaviour via molecular mechanism"
        },
        {
          "field_a_term": "Objective reduction (OR) — quantum gravity collapse (Penrose)",
          "field_b_term": "Moment of conscious experience (Hameroff)",
          "note": "Orch-OR hypothesis — contested, no experimental confirmation"
        },
        {
          "field_a_term": "Decoherence time τ_D ~ ℏ/kT × (mass/m_Planck)^{-1} (Tegmark)",
          "field_b_term": "Maximum duration of quantum coherence in warm biological tissue",
          "note": "Tegmark estimates τ_D ~ 10⁻¹³ s for tubulin — argues against Orch-OR"
        },
        {
          "field_a_term": "Quantum tunneling rate (enzyme kinetics)",
          "field_b_term": "Rate enhancement beyond classical Arrhenius prediction",
          "note": "Tunneling increases enzyme rates by factors of 10³-10⁷ over classical predictions"
        },
        {
          "field_a_term": "Quantum walk (delocalized exciton over multiple sites)",
          "field_b_term": "Efficient energy transfer by simultaneous pathway sampling",
          "note": "Classical random walk is less efficient than quantum walk for energy transport"
        }
      ],
      "references": [
        {
          "doi": "10.1038/nature05678",
          "note": "Engel et al. (2007) Evidence for wavelike energy transfer through quantum coherence in photosynthetic systems, Nature 446:782 — first observation of quantum beats in FMO at 277 K\n"
        },
        {
          "doi": "10.1016/S0006-3495(00)76371-0",
          "note": "Ritz, Adem & Schulten (2000) A model for photoreceptor-based magnetoreception in birds, Biophys J 78:707 — radical pair magnetoreception model\n"
        },
        {
          "doi": "10.1016/j.plrev.2013.08.002",
          "note": "Hameroff & Penrose (2014) Consciousness in the universe: a review of the Orch-OR theory, Phys Life Rev 11:39 — comprehensive Orch-OR review\n"
        },
        {
          "doi": "10.1038/nphys2474",
          "note": "Lambert et al. (2013) Quantum biology, Nat Phys 9:10 — review of established quantum biology phenomena and open questions including neural function\n"
        }
      ],
      "last_reviewed": "2026-05-06",
      "file": "cross-domain/physics-neuroscience/b-quantum-biology-neural-computation.yaml"
    },
    {
      "id": "b-quantum-zeno-x-measurement",
      "title": "The quantum Zeno effect — frequent projective measurement slowing coherent evolution — offers a rigorous mathematical template for how repeated observation or interruption can stabilize internal dynamics in perception and cognition, without assuming literal quantum coherence in neural tissue.",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "Quantum Zeno dynamics suppress transitions when a system is interrogated frequently enough that short-time survival amplitudes dominate; mathematically this is tied to products of projections interleaved with unitary steps. Cognitive tasks with repeated self-report, attentional sampling, or interrup",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-quantum-zeno-like-slowing-in-attention-networks"
      ],
      "communication_gap": "Quantum optics and ion-trap communities publish precise Zeno scalings, while cognitive science more often uses informal ‘interruption’ language without linking to projection maps or short-time expansions that clarify when stabilization should occur.",
      "translation_table": [
        {
          "field_a_term": "projection operators at each measurement time",
          "field_b_term": "categorical readouts of an internal decision or memory state"
        },
        {
          "field_a_term": "survival probability nonlinearity at short times",
          "field_b_term": "empirical switch hazard as a function of probe cadence"
        },
        {
          "field_a_term": "measurement strength / coupling to meter",
          "field_b_term": "task demands that force explicit state reporting"
        }
      ],
      "references": [
        {
          "doi": "10.1103/PhysRevLett.74.1259",
          "note": "Itano et al. (1995) — experimental quantum Zeno effect"
        },
        {
          "doi": "10.1103/PhysRevA.41.2295",
          "note": "Facchi & Pascazio (1990) — quantum Zeno dynamics framework"
        }
      ],
      "last_reviewed": "2026-05-07",
      "file": "cross-domain/physics-neuroscience/b-quantum-zeno-x-measurement.yaml"
    },
    {
      "id": "b-spin-waves-neural-synchronization",
      "title": "Magnon dispersion in ferromagnets is formally identical to phase-oscillation band structure in coupled neural networks (Kuramoto model)",
      "source_domain": "",
      "target_domain": "",
      "status": "proposed",
      "bridge_claim": "Spin waves (magnons) in ferromagnets propagate collective oscillations of magnetic moment orientation with a dispersion relation ω(k) that mirrors the band structure of phase-oscillation modes in coupled neural oscillator networks described by the Kuramoto model. In both systems, long-range synchron",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-kuramoto-magnon-synchronization-threshold-equivalence"
      ],
      "communication_gap": "Condensed matter physicists and computational neuroscientists rarely co-publish; terminology (magnon vs. oscillator mode, exchange coupling vs. synaptic weight) conceals the mathematical identity. Spintronics and neural computation are funded through separate agencies.\n",
      "translation_table": [
        {
          "field_a_term": "magnon (spin-wave quantum)",
          "field_b_term": "phase oscillation mode",
          "note": "Both are collective excitations of an ordered state propagating on a lattice/network"
        },
        {
          "field_a_term": "exchange coupling J",
          "field_b_term": "synaptic coupling strength K",
          "note": "Critical coupling threshold for synchronization is J_c or K_c in respective models"
        },
        {
          "field_a_term": "ferromagnetic order parameter (magnetisation)",
          "field_b_term": "Kuramoto order parameter r = |N^{-1} Σ e^{iθ_j}|",
          "note": "Both measure global coherence; both undergo continuous phase transitions at threshold coupling"
        },
        {
          "field_a_term": "spin-wave dispersion ω(k) = Dk²",
          "field_b_term": "neural oscillation band structure",
          "note": "Both describe how frequency scales with wavenumber/mode index"
        }
      ],
      "references": [
        {
          "note": "Bloch (1930) — spin-wave theory in ferromagnets",
          "doi": "10.1007/BF01339661"
        },
        {
          "note": "Kuramoto (1984) Chemical Oscillations, Waves and Turbulence — coupled oscillator model"
        },
        {
          "note": "Strogatz (2000) — Kuramoto model review, synchronization phase transition",
          "doi": "10.1016/S0167-2789(00)00094-4"
        },
        {
          "note": "Néel (1948) — antiferromagnetic spin waves"
        }
      ],
      "last_reviewed": "2026-05-06",
      "file": "cross-domain/physics-neuroscience/b-spin-waves-neural-synchronization.yaml"
    },
    {
      "id": "b-stochastic-resonance",
      "title": "Stochastic resonance — the counterintuitive enhancement of weak-signal detection by adding noise — is a universal nonlinear phenomenon observed in physical bistable systems, hair-cell mechanoreceptors, cricket cercal systems, and human tactile perception, with optimal noise amplitude predicted by the same signal-to-noise ratio analysis in all cases.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "In a bistable system (e.g. a double-well potential), a subthreshold periodic signal alone cannot drive transitions between wells. Adding noise of optimal amplitude causes the system to cross the barrier preferentially in phase with the signal, producing a maximum in the output signal-to-noise ratio ",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-sensory-noise-sr-optimality"
      ],
      "communication_gap": "Stochastic resonance was identified in physics (Benzi et al. 1981, Nicolis 1982) in the context of glacial climate oscillations, then moved to electronics and bistable ring lasers before reaching sensory biology. However, the physics and neuroscience literatures remain largely separate: neuroscientists cite the SR papers but rarely engage with the quantitative predictions from nonlinear dynamics (Kramers rate, SNR functional form). Therapeutic applications (noise-based prosthetics) are largely in rehabilitation engineering, disconnected from the theoretical physics foundation.\n",
      "translation_table": [
        {
          "field_a_term": "bistable double-well potential",
          "field_b_term": "threshold-based sensory neuron with two states (subthreshold / firing)"
        },
        {
          "field_a_term": "optimal noise amplitude D_opt (maximizes SNR)",
          "field_b_term": "optimal physiological noise level (thermal + channel noise) in hair cells or mechanoreceptors"
        },
        {
          "field_a_term": "periodic subthreshold forcing signal",
          "field_b_term": "weak periodic stimulus (prey water wave, faint touch, weak sound)"
        },
        {
          "field_a_term": "Kramers rate (thermal escape rate over barrier)",
          "field_b_term": "spontaneous firing rate in sensory neuron below threshold"
        },
        {
          "field_a_term": "output SNR peak at D_opt",
          "field_b_term": "behavioural detection threshold minimum at optimal noise; psychometric function nonmonotonicity"
        },
        {
          "field_a_term": "residence time distribution (bimodal at SR)",
          "field_b_term": "interspike interval distribution locked to stimulus period"
        }
      ],
      "references": [
        {
          "doi": "10.1007/BF02451825",
          "note": "Benzi, Sutera, Vulpiani (1981) - original stochastic resonance paper (climate oscillations)"
        },
        {
          "doi": "10.1038/373033a0",
          "note": "Levin & Miller (1996) - stochastic resonance in cricket cercal sensory neurons"
        },
        {
          "doi": "10.1126/science.271.5246.239",
          "note": "Collins et al. (1996) - noise-enhanced human tactile sensitivity"
        },
        {
          "doi": "10.1038/373033a0",
          "note": "Russell & Moss (1995) - noise-enhanced sensory information in crayfish"
        },
        {
          "arxiv": "cond-mat/9503002",
          "note": "Wiesenfeld & Moss (1995) - stochastic resonance and the benefits of noise in nonlinear systems"
        }
      ],
      "last_reviewed": "2026-05-05",
      "file": "cross-domain/physics-neuroscience/b-stochastic-resonance.yaml"
    },
    {
      "id": "b-synchronization-circadian",
      "title": "Kuramoto phase locking ↔ circadian entrainment: jet lag as desynchronization crisis",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "Kuramoto (1975) showed that a population of N weakly-coupled oscillators with heterogeneous natural frequencies omega_i synchronizes above a critical coupling strength K_c = 2/pi*g(0) (where g is the frequency distribution width), producing a collective phase-locked state described by the order para",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-circadian-synchrony-kuramoto-critical-coupling"
      ],
      "communication_gap": "Kuramoto published in the physics literature (Progress of Theoretical Physics, 1975). Circadian biology is dominated by genetics and cell biology journals (Cell, Nature, PNAS neuroscience section). The mathematical mapping was made explicit by Gonze et al. (2005, Biophysical Journal) but remains poorly known among chronobiologists who treat SCN coupling as a biological curiosity rather than a phase-transition phenomenon with universal scaling laws.\n",
      "translation_table": [
        {
          "field_a_term": "oscillator natural frequency omega_i",
          "field_b_term": "individual SCN neuron circadian period (~23.5-24.5 h)",
          "note": "Gaussian spread of ~1 h width; sets K_c"
        },
        {
          "field_a_term": "coupling constant K",
          "field_b_term": "VIP neuropeptide coupling strength",
          "note": "K > K_c required for coherent circadian output"
        },
        {
          "field_a_term": "Kuramoto order parameter r",
          "field_b_term": "amplitude of circadian behavioral rhythms",
          "note": "r→0 during jet lag / VIP knockout; r→1 in healthy entrainment"
        },
        {
          "field_a_term": "external forcing (Zeitgeber)",
          "field_b_term": "light-dark cycle via retinohypothalamic tract",
          "note": "Phase shifts the Kuramoto system; PRC determines re-entrainment rate"
        },
        {
          "field_a_term": "synchronization transition at K_c",
          "field_b_term": "onset of coherent circadian output in developing SCN",
          "note": "SCN coupling strengthens postnatally until K > K_c"
        }
      ],
      "references": [
        {
          "doi": "10.1007/BFb0013365",
          "note": "Kuramoto (1975) - original self-entrainment paper, Springer Lecture Notes"
        },
        {
          "doi": "10.1529/biophysj.104.058388",
          "note": "Gonze et al. (2005) Biophysical Journal - Kuramoto model of SCN synchrony"
        },
        {
          "doi": "10.1016/j.neuron.2010.09.023",
          "note": "Welsh et al. (2010) Neuron - VIP coupling and SCN desynchrony review"
        },
        {
          "doi": "10.1038/s41593-019-0419-0",
          "note": "Aton & Herzog (2005) - mechanisms of SCN circadian rhythm coupling"
        }
      ],
      "last_reviewed": "2026-05-05",
      "file": "cross-domain/physics-neuroscience/b-synchronization-circadian.yaml"
    },
    {
      "id": "b-percolation-oncology",
      "title": "Tumor vascular network fragmentation under adaptive therapy maps directly onto percolation-threshold transitions studied in statistical physics.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "proposed",
      "bridge_claim": "When a tumor's blood-supply network is disrupted below its percolation threshold, large-scale connectivity collapses and nutrient delivery fails — the same phase transition that physicists use to model connectivity in random graphs and porous media. Adaptive therapy (cycling drug doses to exploit co",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-adaptive-therapy-percolation-threshold"
      ],
      "communication_gap": "Oncology journals rarely cite statistical-physics literature, and vice versa. Clinical trials measure tumour volume or survival endpoints, not topological graph metrics. The toolchains (MATLAB/Python network libraries vs. medical imaging suites) are largely siloed, and grant bodies do not routinely fund physics-oncology hybrids.\n",
      "translation_table": [
        {
          "field_a_term": "bond percolation probability",
          "field_b_term": "vessel patency under treatment pressure",
          "note": "Fraction of edges (vessel segments) still functional"
        },
        {
          "field_a_term": "giant connected component",
          "field_b_term": "viable tumour vascular core",
          "note": "The dominant connected region supplying nutrients"
        },
        {
          "field_a_term": "percolation threshold p_c",
          "field_b_term": "minimum viable vascular density",
          "note": "Critical fraction below which nutrient supply collapses"
        },
        {
          "field_a_term": "cluster size distribution",
          "field_b_term": "isolated tumour micro-regions",
          "note": "Disconnected fragments starved of blood supply"
        },
        {
          "field_a_term": "renormalisation group",
          "field_b_term": "adaptive dosing schedule optimisation",
          "note": "Multi-scale description of how local drug actions aggregate to global effects"
        }
      ],
      "references": [
        {
          "arxiv": "2405.04242",
          "note": "Harvested paper that seeded u-tumor-containment-percolation"
        },
        {
          "doi": "10.1038/s41559-017-0328-6",
          "note": "Gatenby et al. — foundational adaptive-therapy clinical work"
        },
        {
          "doi": "10.1103/PhysRevLett.85.4626",
          "note": "Callaway et al. — network robustness and percolation thresholds"
        }
      ],
      "last_reviewed": "2026-05-04",
      "file": "cross-domain/physics-oncology/b-percolation-oncology.yaml"
    },
    {
      "id": "b-higgs-superconductivity",
      "title": "Higgs mechanism (particle physics) = Anderson-Higgs mechanism (superconductivity): same spontaneous symmetry breaking",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "The Higgs mechanism — by which the W and Z bosons acquire mass in the Standard Model — is mathematically identical to the Meissner effect in superconductors, discovered by Anderson (1958) and formalized by the BCS theory (1957). In both systems, a U(1) gauge symmetry is spontaneously broken by a com",
      "open_unknowns": [],
      "related_hypotheses": [],
      "communication_gap": "Anderson published the superconductor mechanism in Physical Review (1958) in the condensed-matter community. Higgs published in Physics Letters (1964) in the particle physics community. The mathematical identity was understood by a small group of theorists by the late 1960s but remains non-standard in both undergraduate condensed-matter and particle physics curricula. Most physics students learn BCS and the Standard Model in separate courses with no mention of the cross-domain equivalence.\n",
      "translation_table": [
        {
          "field_a_term": "Higgs field phi with <phi> != 0",
          "field_b_term": "Cooper pair condensate psi = |psi|*e^{i*theta}",
          "note": "Both are complex scalar order parameters with spontaneous U(1) breaking"
        },
        {
          "field_a_term": "W, Z bosons (massive)",
          "field_b_term": "photon inside superconductor (screened / massive)",
          "note": "Meissner effect = photon mass inside SC = expulsion of magnetic field"
        },
        {
          "field_a_term": "Higgs boson (125 GeV)",
          "field_b_term": "Higgs mode (amplitude fluctuation of condensate, ~meV)",
          "note": "Same excitation — amplitude mode of the order parameter"
        },
        {
          "field_a_term": "Mexican hat (wine bottle) potential V(phi)",
          "field_b_term": "Ginzburg-Landau free energy F(psi)",
          "note": "Identical quartic polynomial; minimum at |phi| = v/sqrt(2)"
        },
        {
          "field_a_term": "Goldstone boson (eaten by W/Z)",
          "field_b_term": "Phase fluctuation (eaten by photon, becomes Meissner screening)",
          "note": "Goldstone theorem: broken continuous symmetry -> massless boson -> absorbed"
        },
        {
          "field_a_term": "symmetry breaking scale v = 246 GeV",
          "field_b_term": "superconducting gap Delta ~ meV",
          "note": "13 orders of magnitude apart; same mathematical structure"
        }
      ],
      "references": [
        {
          "doi": "10.1103/PhysRev.110.827",
          "note": "Anderson (1958) Phys Rev — superconductivity and gauge symmetry"
        },
        {
          "doi": "10.1016/0031-9163(64)91136-9",
          "note": "Higgs (1964) Phys Lett — broken symmetries and masses of gauge bosons"
        },
        {
          "doi": "10.1103/PhysRevLett.108.047003",
          "note": "Matsunaga et al. (2012) PRL — Higgs amplitude mode in NbN superconductor"
        },
        {
          "doi": "10.1103/PhysRev.106.162",
          "note": "Bardeen, Cooper & Schrieffer (1957) — BCS theory"
        }
      ],
      "last_reviewed": "2026-05-05",
      "file": "cross-domain/physics-physics/b-higgs-superconductivity.yaml"
    },
    {
      "id": "b-landau-theory-universality",
      "title": "Landau order parameter theory ↔ all second-order phase transitions: one framework governs superconductors, magnets, liquid crystals, and neural criticality",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "Landau (1937) proposed that all continuous (second-order) phase transitions can be described by an order parameter phi that vanishes in the disordered phase and is non-zero in the ordered phase, with free energy expandable in even powers of phi: F = a*phi^2 + b*phi^4 + c*(∇phi)^2. The coefficient a ",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-criticality-conscious-integration",
        "h-landau-neural-transition-measurability"
      ],
      "communication_gap": "Landau published in Russian (Zh. Eksp. Teor. Fiz., 1937). Wilson's RG was published in Reviews of Modern Physics (1983) for physicists. Neuroscientists studying criticality (Beggs, Plenz, Chialvo) rarely cite Landau or Wilson directly, instead reinventing the universality concept from scratch. Materials scientists know Landau theory deeply but rarely consider its application to biological or social systems. The conceptual unification is powerful but pedagogically fragmented across disciplines.\n",
      "translation_table": [
        {
          "field_a_term": "order parameter phi",
          "field_b_term": "magnetization / Cooper pair density / nematic tensor / firing rate",
          "note": "Different physical quantity, same mathematical role in each system"
        },
        {
          "field_a_term": "coefficient a = a_0*(T-T_c)",
          "field_b_term": "temperature distance from critical point / coupling from threshold",
          "note": "Sign change at T_c drives transition; maps to distance from neural criticality"
        },
        {
          "field_a_term": "universality class (symmetry + dimension)",
          "field_b_term": "Ising class (d=3): magnets, liquid-gas, neural mean-field",
          "note": "Ising, XY, Heisenberg, O(n) classes cover most physical transitions"
        },
        {
          "field_a_term": "RG flow to fixed point",
          "field_b_term": "long-wavelength physics determined by fixed point, not microscopy",
          "note": "Why biological neural criticality resembles magnetic criticality despite different physics"
        },
        {
          "field_a_term": "Ginzburg criterion",
          "field_b_term": "when mean-field theory breaks down (fluctuation-dominated regime)",
          "note": "Determines whether critical exponents are classical or anomalous"
        }
      ],
      "references": [
        {
          "doi": "10.1016/B978-0-08-010586-4.50034-1",
          "note": "Landau (1937) - original order parameter theory of phase transitions"
        },
        {
          "doi": "10.1103/RevModPhys.55.583",
          "note": "Wilson & Kogut (1974/1983) Rev. Mod. Phys. - RG and universality"
        },
        {
          "doi": "10.1103/PhysRevLett.87.198701",
          "note": "Chialvo (2010) Nat. Phys. - neural criticality and Landau-like order"
        },
        {
          "doi": "10.1017/CBO9780511622250",
          "note": "Goldenfeld (1992) - Lectures on Phase Transitions and RG (textbook)"
        }
      ],
      "last_reviewed": "2026-05-05",
      "file": "cross-domain/physics-physics/b-landau-theory-universality.yaml"
    },
    {
      "id": "b-ising-social-dynamics",
      "title": "The Ising model of ferromagnetism describes opinion dynamics, social norm adoption, and political polarisation — social tipping points (climate action spreading, norm cascades, market crashes) are formal phase transitions in the Ising universality class, with measurable early-warning indicators derivable from statistical physics.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "proposed",
      "bridge_claim": "The Ising model (1920) places binary spins (+1/-1) on a lattice with ferromagnetic coupling J: spins prefer to align with neighbours. Below the Curie temperature T_c, the system spontaneously magnetises (all spins align — ferromagnetic phase). Above T_c, spins are disordered (paramagnetic phase). At",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-norm-cascade-ising-ew"
      ],
      "communication_gap": "The Ising-voter model analogy has existed in statistical physics since Schelling (1971) and has been formalised by Galam (1990s), Sznajd (2000), and Castellano et al. (2009 Reviews of Modern Physics). Despite this, political science, sociology, and behavioural economics journals virtually never use universality classes, critical exponents, or bifurcation theory. The communication gap is mutual: physicists who work on social models rarely engage with empirical political science datasets or collaborate with survey researchers who hold the relevant data.\n",
      "translation_table": [
        {
          "field_a_term": "Ising spin (+1/-1)",
          "field_b_term": "binary opinion or behaviour (adopts norm / does not adopt norm)"
        },
        {
          "field_a_term": "ferromagnetic coupling J (spin-spin interaction strength)",
          "field_b_term": "social influence strength (peer pressure, network centrality)"
        },
        {
          "field_a_term": "Curie temperature T_c (phase transition point)",
          "field_b_term": "social tipping point (critical level of social temperature / contrarianism)"
        },
        {
          "field_a_term": "spontaneous magnetisation (order parameter M)",
          "field_b_term": "consensus / polarisation (fraction of population holding majority opinion)"
        },
        {
          "field_a_term": "external magnetic field H",
          "field_b_term": "media influence, advertising, top-down policy, or individual authority figures"
        },
        {
          "field_a_term": "critical fluctuations (diverging susceptibility chi ~ |T-T_c|^(-gamma))",
          "field_b_term": "increasing opinion volatility near social tipping point"
        },
        {
          "field_a_term": "Ising domain wall",
          "field_b_term": "geographic or social network boundary between opinion communities (echo chamber boundary)"
        },
        {
          "field_a_term": "Ising universality class (nu=1, beta=1/8, gamma=7/4 in 2D)",
          "field_b_term": "universal opinion dynamics exponents — predicted but never measured in empirical attitude data"
        }
      ],
      "references": [
        {
          "doi": "10.1103/RevModPhys.81.591",
          "note": "Castellano, Fortunato, Loreto (2009) — comprehensive review of statistical physics of social dynamics; Ising universality in opinion models"
        },
        {
          "doi": "10.1140/epjb/e2008-00transition",
          "note": "Galam (2008) — sociophysics; majority-rule model as Ising transition"
        },
        {
          "doi": "10.1016/S0378-4371(00)00155-3",
          "note": "Sznajd-Weron & Sznajd (2000) — opinion dynamics on lattice; Ising universality class"
        },
        {
          "doi": "10.1038/s41562-018-0306-y",
          "note": "Otto et al. (2020) — social tipping points for climate action; qualitative; no bifurcation class assigned"
        },
        {
          "doi": "10.1073/pnas.0305937101",
          "note": "Sornette & Zhou (2006) — LPPL market crash model as Ising with long-range interactions"
        }
      ],
      "last_reviewed": "2026-05-04",
      "file": "cross-domain/physics-social/b-ising-social-dynamics.yaml"
    },
    {
      "id": "b-statistical-physics-x-social-science",
      "title": "Statistical Physics x Social Science — opinion dynamics as spin systems\n",
      "source_domain": "",
      "target_domain": "",
      "status": "proposed",
      "bridge_claim": "Collective human opinion formation, consensus emergence, and polarization obey the same universality class as ferromagnetic spin systems near critical temperature; the Ising model with social interaction terms quantitatively reproduces empirically observed opinion distributions in large populations.",
      "open_unknowns": [],
      "related_hypotheses": [],
      "communication_gap": "Social scientists distrust physics-inspired models as reductive; physicists rarely engage with qualitative social theory. Cross-disciplinary publication is structurally difficult due to journal siloing.\n",
      "translation_table": [
        {
          "field_a_term": "Spin up / spin down (+/-1)",
          "field_b_term": "Opinion A / opinion B",
          "note": "Binary opinion states map directly to spin states; the Hamiltonian coupling J corresponds to social influence strength.\n"
        },
        {
          "field_a_term": "Temperature T (disorder parameter)",
          "field_b_term": "Social noise / individuality",
          "note": "High T = high individuality (disordered phase); low T = conformity (ordered phase). Critical T corresponds to the onset of polarization.\n"
        },
        {
          "field_a_term": "Phase transition (ferromagnetic ordering)",
          "field_b_term": "Consensus formation / cascade",
          "note": "Below critical T, spontaneous symmetry breaking drives population to consensus — equivalent to opinion cascade in social networks.\n"
        },
        {
          "field_a_term": "External magnetic field H",
          "field_b_term": "Media influence / agenda setting",
          "note": "Bias field H tilts the equilibrium opinion distribution — quantifies media power in a measurable way.\n"
        }
      ],
      "references": [
        {
          "doi": "10.1103/RevModPhys.81.591",
          "note": "Castellano, Fortunato & Loreto (2009) — statistical physics of social dynamics; comprehensive review"
        },
        {
          "doi": "10.1016/j.physrep.2004.11.009",
          "note": "Sznajd-Weron (2005) — opinion dynamics and Ising-type models"
        }
      ],
      "last_reviewed": "2026-05-07",
      "file": "cross-domain/physics-social/b-statistical-physics-x-social-science.yaml"
    },
    {
      "id": "b-crowd-dynamics-social-force-model",
      "title": "Pedestrian crowd dynamics follow Helbing's social force model — each individual is driven by desired velocity, interpersonal repulsion, and wall avoidance forces — producing emergent phenomena including lane formation and crowd turbulence that match the mathematical structure of active-matter molecular dynamics near a jamming transition",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "Helbing's social force model (1995) gives m_i * d^2r_i/dt^2 = F_i^drive + sum_j F_{ij}^repulse + F_i^wall, where F_{ij}^repulse = (A*exp((r_i+r_j-d_{ij})/B) + k*g(r_i+r_j-d_{ij})) * n_{ij} + kappa*g(r_i+r_j-d_{ij})*delta_v_i^t * t_{ij}; these are identical in form to a Lennard-Jones-like pair potent",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-crowd-dynamics-lane-formation-critical-density"
      ],
      "communication_gap": "Urban planners and safety engineers use empirical crowd density guidelines (Level of Service A–F) while physicists model crowd dynamics as active matter; the Helbing social force model bridged these worlds but operational evacuation software rarely uses physics-based parameter calibration from field data.",
      "translation_table": [
        {
          "field_a_term": "desired velocity drive F_i^drive in social force model (social science)",
          "field_b_term": "external driving force on a particle in driven diffusive system (physics)",
          "note": "Each pedestrian acts as a driven particle with preferred speed; drive minus friction produces the terminal velocity"
        },
        {
          "field_a_term": "interpersonal repulsion between pedestrians (social science)",
          "field_b_term": "short-range repulsive pair potential in molecular dynamics (physics)",
          "note": "Psychophysical proximity discomfort maps to an exponential potential; contact force maps to hard-sphere constraint"
        },
        {
          "field_a_term": "lane formation in bidirectional flow (social science)",
          "field_b_term": "spontaneous symmetry breaking / stripe phase in active matter (physics)",
          "note": "Lane formation is a phase transition from disordered to stripe-ordered flow; analyzed with order parameters from condensed matter"
        },
        {
          "field_a_term": "crowd turbulence at high density (social science)",
          "field_b_term": "velocity correlation length divergence near critical density (physics)",
          "note": "Crowd turbulence shows correlated velocity fluctuations identical to active matter near a glass/jamming transition"
        }
      ],
      "references": [
        {
          "doi": "10.1103/PhysRevE.51.4282",
          "note": "Helbing & Molnar (1995) - social force model for pedestrian dynamics (foundational paper)"
        },
        {
          "doi": "10.1038/438439a",
          "note": "Helbing et al. (2007) - dynamics of crowd disasters: an empirical study (crowd turbulence)"
        },
        {
          "doi": "10.1073/pnas.1001894107",
          "note": "Moussaid et al. (2011) - how simple rules determine pedestrian behavior and crowd disasters"
        }
      ],
      "last_reviewed": "2026-05-07",
      "file": "cross-domain/physics-social-science/b-crowd-dynamics-social-force-model.yaml"
    },
    {
      "id": "b-network-epidemiology-herd-immunity",
      "title": "Network Epidemiology and Herd Immunity — SIR dynamics on heterogeneous contact networks, scale-free epidemic thresholds, and superspreader percolation",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "The SIR (Susceptible–Infected–Recovered) model on networks assigns each node a state and allows transmission along edges at rate β with recovery at rate γ. In homogeneous networks the basic reproduction number R₀ = β⟨k⟩/γ determines whether epidemics spread. On heterogeneous networks with degree dis",
      "open_unknowns": [],
      "related_hypotheses": [],
      "communication_gap": "Mathematical epidemiology (compartmental SIR models) developed in the public health tradition largely independently of the network physics community that developed scale-free network theory in the late 1990s. The Pastor-Satorras & Vespignani (2001) paper bridged these communities but is still not standard reading in public health curricula. During COVID-19, failure to incorporate network heterogeneity and superspreading into early models led to systematic underestimation of outbreak clustering and overestimation of required vaccine coverage.\n",
      "translation_table": [
        {
          "field_a_term": "R₀ = β⟨k²⟩/(γ⟨k⟩) on heterogeneous network",
          "field_b_term": "expected secondary cases adjusted for contact network heterogeneity",
          "note": "⟨k²⟩/⟨k⟩ is the excess degree — the average connectivity of a random neighbour, which exceeds ⟨k⟩"
        },
        {
          "field_a_term": "vanishing epidemic threshold on scale-free networks",
          "field_b_term": "impossibility of achieving epidemic control by reducing transmissibility alone",
          "note": "For γ ≤ 3 in p(k) ∝ k^{-γ}, any β > 0 leads to endemic infection absent immunisation"
        },
        {
          "field_a_term": "percolation threshold (giant component dissolution)",
          "field_b_term": "herd immunity threshold — fraction immune to fragment transmission network",
          "note": "Epidemic ends when infected cluster can no longer percolate through the susceptible network"
        },
        {
          "field_a_term": "acquaintance vaccination (immunise random neighbour of random node)",
          "field_b_term": "friendship paradox-based strategy to reach hubs without full degree knowledge",
          "note": "Random neighbours have higher expected degree than random nodes — statistically targets hubs efficiently"
        },
        {
          "field_a_term": "overdispersed offspring distribution (negative binomial k)",
          "field_b_term": "superspreader phenomenon in disease transmission",
          "note": "Lloyd-Smith et al. 2005: k < 1 implies most cases cause zero transmissions but rare events cause many"
        },
        {
          "field_a_term": "SIR phase transition at R₀ = 1",
          "field_b_term": "epidemic vs. fade-out in a population",
          "note": "Second-order phase transition in network epidemiology; analogous to ferromagnetic–paramagnetic transition"
        }
      ],
      "references": [
        {
          "doi": "10.1103/PhysRevLett.86.3200",
          "note": "Pastor-Satorras & Vespignani (2001) Phys Rev Lett 86:3200 — epidemic threshold on scale-free networks"
        },
        {
          "note": "Kermack & McKendrick (1927) Proc R Soc A 115:700 — original SIR model"
        },
        {
          "note": "Anderson & May (1991) Infectious Diseases of Humans — classical epidemiology reference"
        },
        {
          "doi": "10.1038/nature04153",
          "note": "Lloyd-Smith et al. (2005) Nature 438:355 — superspreading and overdispersion"
        }
      ],
      "last_reviewed": "2026-05-06",
      "file": "cross-domain/physics-social-science/b-network-epidemiology-herd-immunity.yaml"
    },
    {
      "id": "b-order-book-market-microstructure",
      "title": "The limit order book is a non-equilibrium stochastic system governed by Poisson order flows — Kyle's lambda (price impact linear in signed flow), the Glosten-Milgrom adverse selection spread, and the square-root market impact law connect queueing theory and statistical physics to market microstructure.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "The limit order book (LOB) is a queue of standing buy (bid) and sell (ask) orders at discrete price levels. Market dynamics are driven by three Poisson processes: limit order arrivals (rate λ_b, λ_a at each price level), market order arrivals (rate μ), and cancellations (rate δ). Cont-Stoikov-Talrej",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-order-book-square-root-impact-universal-liquidity"
      ],
      "communication_gap": "Financial economics and market microstructure are published in finance journals (Journal of Finance, Review of Financial Studies) that physicists rarely read. The statistical physics of disordered systems and queuing theory is published in physics journals. Econophysics bridges the gap but is treated as peripheral by both finance academics and physics departments. Regulators who design market structure rules rarely engage with the physics or queueing theory literature.\n",
      "translation_table": [
        {
          "field_a_term": "Poisson queue (arrival rate λ, service rate μ)",
          "field_b_term": "limit order book (order arrival, cancellation, execution rates)",
          "note": "LOB is a multi-server queue with state-dependent rates; analytical solutions exist for the exponential model"
        },
        {
          "field_a_term": "illiquidity measure λ (Kyle's lambda)",
          "field_b_term": "price impact per unit signed order flow (bid-ask adverse selection)",
          "note": "Kyle's lambda is the econophysics analogue of the friction coefficient; higher λ = less liquid market"
        },
        {
          "field_a_term": "square root price impact ΔP ∝ √Q",
          "field_b_term": "empirical market impact law for institutional trades",
          "note": "Derived from LOB model with replenishment; consistent with meta-order execution data across global equities"
        },
        {
          "field_a_term": "information asymmetry (adverse selection)",
          "field_b_term": "Glosten-Milgrom bid-ask spread = 2·λ·P(informed trader)·E[|v-P||trade]",
          "note": "Spread is zero in a world with only noise traders; it reflects the cost of providing liquidity to informed agents"
        },
        {
          "field_a_term": "correlated failure / phase transition",
          "field_b_term": "flash crash (LOB drains when HFT market makers withdraw simultaneously)",
          "note": "The 2010 Flash Crash saw the Dow Jones fall 9% in minutes as correlated HFT cancellations drained liquidity"
        }
      ],
      "references": [
        {
          "doi": "10.2307/1913210",
          "note": "Kyle (1985) Econometrica 53:1315 — continuous auctions and insider trading; Kyle's lambda illiquidity measure"
        },
        {
          "doi": "10.1016/0304-405X(85)90044-3",
          "note": "Glosten & Milgrom (1985) J Financ Econ 14:71 — bid, ask, and transaction prices in a market with asymmetric information"
        },
        {
          "doi": "10.1214/09-AAP682",
          "note": "Cont et al. (2010) Ann Appl Probab 20:1462 — stochastic model of order book dynamics"
        },
        {
          "note": "Bouchaud et al. (2018) Trades, Quotes and Prices: Financial Markets Under the Microscope. Cambridge University Press."
        }
      ],
      "last_reviewed": "2026-05-06",
      "file": "cross-domain/physics-social-science/b-order-book-market-microstructure.yaml"
    },
    {
      "id": "b-percolation-rumor-spreading",
      "title": "Rumour and misinformation spreading on social networks maps exactly onto bond percolation on the contact network via the SIR epidemic model — with the percolation threshold p_c → 0 for scale-free networks, meaning any viral meme can reach the giant component of social attention regardless of initial conditions.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "SIR RUMOUR MODEL (Daley & Kendall 1965): Individuals are Susceptible (haven't heard), Infected (spreading), Recovered (heard but no longer spreading). Rate equations:\n\n  dS/dt = -βSI\n  dI/dt = βSI - γI\n  dR/dt = γI\n\nFinal outbreak size R(∞)/N = 1 - e^(-R₀·R(∞)/N) where R₀ = β/γ.\nBOND PERCOLATION MAP",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-misinformation-emotional-valence-persistence"
      ],
      "communication_gap": "Social media researchers and social psychologists who study misinformation rarely use percolation theory. Network scientists publishing in physics journals derive epidemic thresholds without connecting to the empirical social science literature on misinformation content effects.\n",
      "translation_table": [
        {
          "field_a_term": "bond percolation threshold p_c (physics)",
          "field_b_term": "epidemic threshold R_0 = 1 (epidemiology) / virality threshold (social science)",
          "note": "All three are the same phase transition in disguise; p_c = 1/⟨k⟩ = 1/R_0 on regular random graph"
        },
        {
          "field_a_term": "giant connected component (percolation)",
          "field_b_term": "global information cascade (social media)",
          "note": "Information reaches global scale iff spreading event is above percolation threshold"
        },
        {
          "field_a_term": "transmission probability T (percolation)",
          "field_b_term": "virality rate / share probability (social science)",
          "note": "Emotional content of news increases T; this is the mechanism for misinformation advantage"
        },
        {
          "field_a_term": "network degree distribution P(k) (network science)",
          "field_b_term": "follower distribution on social platforms",
          "note": "Scale-free follower distributions imply p_c -> 0; any content can go viral given right initial seed"
        }
      ],
      "references": [
        {
          "doi": "10.1093/imamat/1.1.42",
          "note": "Daley & Kendall (1965) IMA J Appl Math 1:42 — original rumour spreading model"
        },
        {
          "doi": "10.1126/science.aap9559",
          "note": "Vosoughi et al. (2018) Science 359:1146 — false news spreads faster than true"
        },
        {
          "doi": "10.1103/PhysRevE.69.066130",
          "note": "Moreno et al. (2004) Phys Rev E 69:066130 — percolation mapping of rumour spreading"
        },
        {
          "note": "Goffman & Newill (1964) Nature 204:225 — information spread as epidemic"
        }
      ],
      "last_reviewed": "2026-05-06",
      "file": "cross-domain/physics-social-science/b-percolation-rumor-spreading.yaml"
    },
    {
      "id": "b-schelling-phase-separation",
      "title": "Schelling's segregation model maps onto binary-alloy phase separation — social tolerance thresholds are thermodynamic critical points",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "Schelling's (1971) segregation model — agents move when the fraction of unlike neighbors exceeds a threshold — produces complete phase separation even for low tolerance thresholds (~30%). This maps exactly onto binary alloy phase separation: below T_c, A-B mixtures separate into A-rich and B-rich do",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-schelling-spinodal-coarsening"
      ],
      "communication_gap": "Sociologists and urban economists rarely read condensed matter physics; the Cahn-Hilliard formalism is not taught in social science programs. Vinkovic & Kirman (2006) made the connection but it remains largely uncited in mainstream urban economics.\n",
      "translation_table": [
        {
          "field_a_term": "tolerance threshold (fraction of unlike neighbors)",
          "field_b_term": "critical temperature T_c in binary alloy",
          "note": "Below threshold / below T_c, phase separation is thermodynamically favored"
        },
        {
          "field_a_term": "urban neighborhood composition (fraction group A)",
          "field_b_term": "local concentration field phi(x) in Cahn-Hilliard equation",
          "note": "Both fields follow conserved dynamics — total population conserved"
        },
        {
          "field_a_term": "domain coarsening (segregated neighborhoods growing)",
          "field_b_term": "spinodal decomposition and Ostwald ripening",
          "note": "Domain growth follows same power-law kinetics R(t) ~ t^(1/3)"
        },
        {
          "field_a_term": "segregation index (fraction of neighbors same-group)",
          "field_b_term": "order parameter phi_A - phi_B",
          "note": "Both measure departure from mixed phase"
        }
      ],
      "references": [
        {
          "note": "Schelling, T.C. (1971). Dynamic models of segregation. J Math Sociol 1:143-186."
        },
        {
          "doi": "10.1063/1.1744102",
          "note": "Cahn & Hilliard (1958). Free energy of a nonuniform system. J Chem Phys 28:258."
        },
        {
          "doi": "10.1073/pnas.0609371103",
          "note": "Vinkovic & Kirman (2006). A physical analogue of the Schelling model. PNAS 103:19261."
        },
        {
          "doi": "10.1140/epjb/e2009-00234-0",
          "note": "Gauvin et al. (2009). Schelling segregation in an open city. Eur Phys J B 70:293."
        }
      ],
      "last_reviewed": "2026-05-06",
      "file": "cross-domain/physics-social-science/b-schelling-phase-separation.yaml"
    },
    {
      "id": "b-statistical-mechanics-opinion",
      "title": "The Ising model of opinion dynamics maps social consensus formation onto ferromagnetic phase transitions (T < T_c → ordered consensus; T > T_c → disordered pluralism), while bounded-confidence models predict opinion clustering and polarization — bridging statistical mechanics with quantitative social science.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "The voter model and Ising model provide a rigorous statistical mechanics framework for opinion dynamics. In the Ising opinion model, agents (spins) hold binary opinion σ_i = ±1 (yes/no, left/right, agree/disagree), aligned by social influence J (ferromagnetic coupling — conformity pressure) and dist",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-bounded-confidence-epsilon-polarization-social-media-filter-bubbles"
      ],
      "communication_gap": "Statistical physics models of social dynamics (sociophysics) are published in journals like Physica A, Physical Review E, and Int J Mod Phys C — largely invisible to social scientists. Sociologists and political scientists are skeptical of physics analogies for human behavior (agency, deliberation, institutional context are absent from Ising-type models). The opinion dynamics literature (Castellano et al. 2009 Rev Mod Phys review) is extensive but rarely cited in political science. Bounded confidence models were developed by engineers (Deffuant, Weisbuch) and are known in complex systems but not in mainstream social science. Structural balance theory, though originating in social psychology (Heider 1946), has been formalized using graph theory that is not standard in sociology.\n",
      "translation_table": [
        {
          "field_a_term": "spin σ_i = ±1 (up/down)",
          "field_b_term": "binary opinion of agent i (yes/no, liberal/conservative)"
        },
        {
          "field_a_term": "ferromagnetic coupling J (tendency to align)",
          "field_b_term": "social influence / conformity pressure between neighbors"
        },
        {
          "field_a_term": "social noise temperature T",
          "field_b_term": "rate of independent opinion change / contrarianism"
        },
        {
          "field_a_term": "phase transition at T_c (order-disorder)",
          "field_b_term": "tipping point between consensus and pluralism"
        },
        {
          "field_a_term": "spontaneous magnetization ⟨σ⟩ ≠ 0 (ordered phase)",
          "field_b_term": "majority consensus emerging from a homogeneous initial distribution"
        },
        {
          "field_a_term": "confidence bound ε in Deffuant-Weisbuch model",
          "field_b_term": "psychological latitude of acceptance (Sheriff's social judgment theory)"
        },
        {
          "field_a_term": "structural balance (all triangles positive)",
          "field_b_term": "social network ground state (two opposing camps)"
        }
      ],
      "references": [
        {
          "doi": "10.1142/S0129183108012954",
          "note": "Galam (2008) Int J Mod Phys C 19:409 — sociophysics: a review of Galam models"
        },
        {
          "doi": "10.1142/S0129183100000936",
          "note": "Sznajd-Weron & Sznajd (2000) Int J Mod Phys C 11:1157 — opinion evolution in closed community (Sznajd model)"
        },
        {
          "note": "Deffuant et al. (2000) Adv Complex Syst 3:87 — mixing beliefs among interacting agents (bounded confidence)"
        },
        {
          "doi": "10.1103/RevModPhys.81.591",
          "note": "Castellano et al. (2009) Rev Mod Phys 81:591 — statistical physics of social dynamics (comprehensive review)"
        }
      ],
      "last_reviewed": "2026-05-06",
      "file": "cross-domain/physics-social-science/b-statistical-mechanics-opinion.yaml"
    },
    {
      "id": "b-urban-scaling-statistical-physics",
      "title": "Urban scaling laws — city GDP, patents, and crime scaling superlinearly (β ≈ 1.15) while infrastructure scales sublinearly (β ≈ 0.85) with population — emerge from statistical physics of social interaction networks with fractal road geometry, analogous to critical phenomena with universal exponents independent of city-specific cultural or geographic details.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "Bettencourt et al. (2007) showed that urban properties Y scale as power laws Y ∝ N^β with population N for cities across countries and continents. Superlinear scaling (β ≈ 1.15): GDP, patents, R&D employment, wages, crime, disease transmission, new infections — all increase faster than linearly with",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-urban-superlinear-scaling-social-interaction-fractal-road-network"
      ],
      "communication_gap": "Urban economics (Glaeser, Henderson) has studied agglomeration effects for decades in economics journals without connecting to the physics scaling framework. Statistical physicists (Bettencourt, West, Barthelemy) published urban scaling in PNAS and Nature Physics, largely inaccessible to urban planners, economists, and city managers. The fractal road network geometry connection requires both spatial statistics and physics — a combination absent from standard urban planning, economics, or physics curricula.\n",
      "translation_table": [
        {
          "field_a_term": "critical exponent β in power law Y ∝ N^β",
          "field_b_term": "urban scaling exponent (β>1 superlinear, β<1 sublinear)",
          "note": "β is universal (same for US, China, Brazil, EU cities) — like universality classes in phase transitions"
        },
        {
          "field_a_term": "fractal dimension d_f of road network",
          "field_b_term": "geometric constraint determining social interaction density per capita",
          "note": "d_f ≈ 1.8 for most cities — between line (1) and plane (2); reflects hierarchical street structure"
        },
        {
          "field_a_term": "renormalization group (RG) coarse-graining",
          "field_b_term": "increasing city size as RG step — microscopic individual → neighborhood → district",
          "note": "Universal exponents appear at each level of the city hierarchy, as in RG fixed points"
        },
        {
          "field_a_term": "fluctuation-dissipation theorem (variance ∝ susceptibility)",
          "field_b_term": "variance in city GDP ∝ mean GDP (shot noise from innovation events)",
          "note": "Lognormal distribution of city GDP fluctuations consistent with Gibrat's proportional growth rule"
        },
        {
          "field_a_term": "mean-field theory of interacting agents",
          "field_b_term": "urban economics of agglomeration effects (Marshall, 1890)",
          "note": "Agglomeration externalities = mean-field interaction term; exactly the statistical physics approximation"
        }
      ],
      "references": [
        {
          "doi": "10.1073/pnas.0610172104",
          "note": "Bettencourt et al. (2007) Growth, innovation, scaling, and the pace of life in cities. PNAS 104:7301"
        },
        {
          "note": "Batty (2013) The New Science of Cities. MIT Press"
        },
        {
          "doi": "10.1038/srep05561",
          "note": "Louf & Barthelemy (2014) Scaling: lost in the smog. Sci Rep 4:5561"
        },
        {
          "note": "West (2017) Scale: The Universal Laws of Growth, Innovation, Sustainability, and the Pace of Life. Penguin Press"
        }
      ],
      "last_reviewed": "2026-05-06",
      "file": "cross-domain/physics-social-science/b-urban-scaling-statistical-physics.yaml"
    },
    {
      "id": "b-voter-model-opinion-dynamics",
      "title": "The voter model (Clifford–Sudbury 1973) is exactly solvable on any graph and shows that consensus time, coexistence probability, and polarization dynamics depend on spatial dimension and network topology in ways that match empirical political polarization patterns.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "The voter model: each agent holds one of two opinions (0 or 1); at each time step, a random agent copies a random neighbor. This is exactly solvable via duality with coalescing random walks. Key results: (1) In d ≤ 2 (including all finite networks), consensus (all-0 or all-1) occurs with probability",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-voter-model-echo-chamber-polarization-timescale"
      ],
      "communication_gap": "Clifford & Sudbury (1973) published in Biometrika as a model of territorial competition in ecology (not sociology). The social interpretation was developed by physicists (Sood, Redner, Vazquez, Eguíluz) in physics journals. Political scientists and sociologists studying polarization rarely cite the voter model literature. The exact solution via coalescing random walks is not taught in social science programs, despite providing quantitative predictions testable against longitudinal survey data.\n",
      "translation_table": [
        {
          "field_a_term": "voter model agent copying random neighbor",
          "field_b_term": "social influence — individuals adopt opinions from their network"
        },
        {
          "field_a_term": "coalescing random walk dual process",
          "field_b_term": "genealogy of opinion spread — tracks which agent's opinion dominates"
        },
        {
          "field_a_term": "consensus (absorbing state, all spins identical)",
          "field_b_term": "political convergence or ideological monoculture"
        },
        {
          "field_a_term": "coexistence (T → ∞, no consensus)",
          "field_b_term": "persistent political polarization in large societies"
        },
        {
          "field_a_term": "consensus time T_N ∝ N log N (d=2)",
          "field_b_term": "timescale for political opinion cycles in social networks"
        },
        {
          "field_a_term": "modular graph topology (community structure)",
          "field_b_term": "social echo chambers and filter bubbles"
        },
        {
          "field_a_term": "spontaneous opinion flip rate ε (noisy voter)",
          "field_b_term": "independent opinion formation rate outside social influence"
        },
        {
          "field_a_term": "spatial dimension d",
          "field_b_term": "effective social network dimension (expander vs lattice-like)"
        }
      ],
      "references": [
        {
          "doi": "10.2307/2334555",
          "note": "Clifford & Sudbury (1973) A model for spatial conflict. Biometrika 60:581"
        },
        {
          "doi": "10.1103/PhysRevLett.94.178701",
          "note": "Sood & Redner (2005) Voter model on heterogeneous graphs. Phys Rev Lett 94:178701"
        },
        {
          "doi": "10.1088/1751-8113/41/43/435003",
          "note": "Vazquez & Eguíluz (2008) Analytical solution of the voter model on uncorrelated networks. J Phys A 41:435003"
        },
        {
          "doi": "10.1007/s10955-007-9395-9",
          "note": "Mobilia et al. (2007) On the role of zealotry in the voter model. J Stat Phys 128:447"
        }
      ],
      "last_reviewed": "2026-05-06",
      "file": "cross-domain/physics-social-science/b-voter-model-opinion-dynamics.yaml"
    },
    {
      "id": "b-scaling-laws-cities",
      "title": "Urban scaling laws — cities as social organisms obeying superlinear and sublinear power-law scaling",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "Bettencourt et al. (2007) showed that virtually all urban indicators Y scale as power laws Y ∝ N^β with population N, with two universal exponent classes: (1) socioeconomic outputs (patents, GDP, wages, crime, innovation) scale superlinearly with β ≈ 1.15 ± 0.01 across 40+ countries and decades; (2)",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-city-as-social-organism-with-metabolism"
      ],
      "communication_gap": "Urban economics and urban sociology treat city size effects primarily as agglomeration externalities without mathematical scaling theory. Physics-trained researchers (Bettencourt, West, Lobo) introduced the scaling framework from outside the field. Urban economists initially resisted the power-law claims, preferring log-linear regression. The biological analogy (cities as organisms) is viewed as metaphor by sociologists rather than a testable quantitative model.\n",
      "translation_table": [
        {
          "field_a_term": "population N",
          "field_b_term": "body mass M (in metabolic scaling)",
          "note": "The key extensive variable that sets the scale of all other quantities"
        },
        {
          "field_a_term": "socioeconomic output Y ∝ N^1.15",
          "field_b_term": "superlinear return to social density",
          "note": "Each doubling of population yields 15% more output per capita — increasing returns to scale"
        },
        {
          "field_a_term": "infrastructure I ∝ N^0.85",
          "field_b_term": "sublinear scaling (analogous to metabolic rate B ∝ M^0.75)",
          "note": "Economies of scale: larger cities need less infrastructure per capita"
        },
        {
          "field_a_term": "social interaction rate ∝ N^(1+1/6)",
          "field_b_term": "vascular blood flow rate ∝ M^(3/4) (metabolic scaling)",
          "note": "Both derived from network geometry constraints; the exponent δ=1/6 from urban spatial constraints"
        },
        {
          "field_a_term": "city growth rate — must accelerate to sustain superlinear scaling",
          "field_b_term": "biological senescence — metabolic rate increases with size",
          "note": "Cities must periodically 'reset' through major innovations to avoid collapse; organisms senesce"
        },
        {
          "field_a_term": "urban agglomeration (clustering coefficient of social network)",
          "field_b_term": "Reynolds number (turbulence intensity)",
          "note": "Both parameterise the qualitative change in system behaviour with scale"
        }
      ],
      "references": [
        {
          "doi": "10.1073/pnas.0610172104",
          "note": "Bettencourt et al. (2007) PNAS 104:7301 — universal scaling of urban indicators"
        },
        {
          "doi": "10.1126/science.1235823",
          "note": "Bettencourt (2013) Science 340:1438 — origin of scaling in human social networks"
        },
        {
          "note": "West (2017) Scale. Penguin Press. — popular exposition of scaling in cities and organisms",
          "url": "https://www.penguinrandomhouse.com/books/314049/scale-by-geoffrey-west/"
        },
        {
          "note": "Glaeser (2011) Triumph of the City. Macmillan. — urban economics perspective",
          "url": "https://www.macmillanpublishers.com/9780143120544/"
        }
      ],
      "last_reviewed": "2026-05-06",
      "file": "cross-domain/physics-sociology/b-scaling-laws-cities.yaml"
    },
    {
      "id": "b-adiabatic-elimination-x-gene-circuit-model-reduction",
      "title": "Adiabatic elimination from multiscale physics provides a rigorous reduction template for stochastic gene-circuit models.",
      "source_domain": "",
      "target_domain": "",
      "status": "proposed",
      "bridge_claim": "Speculative analogy: Adiabatic elimination from multiscale physics provides a rigorous reduction template for stochastic gene-circuit models.",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-adiabatic-elimination-preserves-switching-time-statistics-in-gene-circuit-surrogates"
      ],
      "communication_gap": "The two communities use different notation, benchmarks, and publication venues, which obscures transferable mathematical structure.",
      "translation_table": [],
      "references": [
        {
          "doi": "10.1103/PhysRevA.31.1695",
          "note": "Adiabatic elimination in stochastic systems."
        }
      ],
      "last_reviewed": "2026-05-08",
      "file": "cross-domain/physics-systems-biology/b-adiabatic-elimination-x-gene-circuit-model-reduction.yaml"
    },
    {
      "id": "b-hawking-radiation-unruh-effect",
      "title": "Hawking radiation from black holes and the Unruh effect experienced by uniformly accelerating observers are mathematically equivalent quantum field theory predictions: both arise from the thermal character of the Minkowski vacuum perceived by non-inertial observers, with temperature T_H = ℏc^3/(8πGMk_B) and T_U = ℏa/(2πck_B) related by the equivalence principle",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "Hawking (1974) showed that a black hole emits thermal radiation at temperature T_H = ℏc^3/(8πGMk_B) because the Bogoliubov transformation relating in- and out-state mode expansions is thermal; Unruh (1976) showed that an observer accelerating at rate a in flat spacetime perceives the Minkowski vacuu",
      "open_unknowns": [],
      "related_hypotheses": [],
      "communication_gap": "Relativists study Hawking radiation in the context of quantum gravity while thermodynamicists study heat baths and statistical mechanics; the equivalence principle connection between T_H and T_U is textbook knowledge in quantum field theory in curved spacetime but poorly integrated with experimental quantum optics analog systems.",
      "translation_table": [
        {
          "field_a_term": "Hawking radiation temperature T_H = ℏc^3/(8πGMk_B) (physics)",
          "field_b_term": "Unruh temperature T_U = ℏa/(2πck_B) for acceleration a = c^4/(4GM) at horizon (thermodynamics)",
          "note": "At the event horizon, the gravitational acceleration equals a = c^4/(4GM); T_H = T_U by the equivalence principle"
        },
        {
          "field_a_term": "black hole event horizon (physics)",
          "field_b_term": "Rindler wedge horizon seen by accelerating observer (physics)",
          "note": "Both horizons block information access; Bogoliubov transformation between horizon-split modes produces thermal spectrum"
        },
        {
          "field_a_term": "black hole entropy S_BH = A/(4L_P^2) (physics)",
          "field_b_term": "entanglement entropy across the Rindler horizon (thermodynamics)",
          "note": "Bekenstein-Hawking entropy equals the entanglement entropy of field vacuum across the horizon"
        },
        {
          "field_a_term": "information paradox and Hawking radiation unitarity (physics)",
          "field_b_term": "thermalization of entangled quantum states across causal horizons (thermodynamics)",
          "note": "Both require resolving how unitary quantum evolution is consistent with thermal emission across horizons"
        }
      ],
      "references": [
        {
          "doi": "10.1007/BF02345020",
          "note": "Hawking (1975) - particle creation by black holes: original derivation"
        },
        {
          "doi": "10.1103/PhysRevD.14.870",
          "note": "Unruh (1976) - notes on black hole evaporation: Unruh effect derivation"
        },
        {
          "doi": "10.12942/lrr-2001-6",
          "note": "Jacobson (2003) - introduction to quantum fields in curved spacetime"
        }
      ],
      "last_reviewed": "2026-05-07",
      "file": "cross-domain/physics-thermodynamics/b-hawking-radiation-unruh-effect.yaml"
    },
    {
      "id": "b-laser-cooling-doppler-optical-molasses",
      "title": "Laser cooling exploits the Doppler effect to selectively absorb photons from the direction of atomic motion, reducing atomic kinetic energy below the Doppler limit kT_D = hbar*Gamma/2; this is entropy reduction by photon-mediated information gain, connecting atomic physics, thermodynamics, and the physics of Maxwell's demon.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "In optical molasses, three orthogonal pairs of counter-propagating laser beams are tuned slightly red-detuned from an atomic transition. An atom moving with velocity v preferentially absorbs photons from the beam it moves toward (Doppler blueshift brings that beam into resonance) and the absorbed ph",
      "open_unknowns": [],
      "related_hypotheses": [],
      "communication_gap": "Atomic physicists implement laser cooling as a laboratory technique without always framing it in Maxwell's demon / entropy reduction thermodynamics; thermodynamicists who study Maxwell's demon rarely consider laser cooling as a physical realisation. The quantum thermodynamics connection (information gain from Doppler-selective absorption reducing entropy) is rarely made explicit.\n",
      "translation_table": [
        {
          "field_a_term": "entropy reduction in atomic motion (thermodynamics)",
          "field_b_term": "reduction of thermal velocity spread by photon absorption (atomic physics)",
          "note": "Laser cooling reduces entropy of translational degrees of freedom below thermal equilibrium"
        },
        {
          "field_a_term": "Maxwell's demon (measurement-based entropy reduction) (thermodynamics)",
          "field_b_term": "Doppler-selective photon absorption (atomic physics)",
          "note": "Atom moving toward beam absorbs preferentially; photon absorption is velocity-selective measurement"
        },
        {
          "field_a_term": "Carnot-like efficiency limit (thermodynamics)",
          "field_b_term": "Doppler cooling limit T_D = hbar*Gamma / (2 k_B) (atomic physics)",
          "note": "Fundamental quantum noise (spontaneous emission recoil) sets the minimum achievable temperature"
        },
        {
          "field_a_term": "entropy export via radiation (thermodynamics)",
          "field_b_term": "spontaneous emission of photons in random directions (atomic physics)",
          "note": "Low-entropy laser photons absorbed; high-entropy (random direction) photons emitted; net entropy export"
        }
      ],
      "references": [
        {
          "doi": "10.1103/PhysRevLett.57.1688",
          "note": "Chu et al. (1985) - three-dimensional viscous confinement and cooling of atoms; optical molasses"
        },
        {
          "doi": "10.1103/RevModPhys.70.707",
          "note": "Phillips (1998) - Nobel Lecture; laser cooling and trapping of neutral atoms"
        },
        {
          "doi": "10.1103/PhysRevA.20.1521",
          "note": "Wineland & Itano (1979) - laser cooling of atoms; thermodynamic framework"
        }
      ],
      "last_reviewed": "2026-05-07",
      "file": "cross-domain/physics-thermodynamics/b-laser-cooling-doppler-optical-molasses.yaml"
    },
    {
      "id": "b-lymphatic-drainage-interstitial-fluid",
      "title": "Lymphatic capillary drainage of interstitial fluid is governed by Starling's revised principle: the balance of oncotic and hydrostatic pressures across the capillary wall drives net filtration that lymphatics must absorb, with lymphatic pumping modeled as a pressure-flow relationship analogous to fluid mechanics in compliant vessel networks\n",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "Interstitial fluid homeostasis obeys the revised Starling equation J_v/A = L_p[(P_c - P_i) - σ(π_c - π_i)] where L_p is hydraulic conductivity, P_c and P_i are capillary and interstitial hydrostatic pressures, and π_c and π_i are colloid oncotic pressures; lymphatics drain this filtrate by generatin",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-starling-oncotic-reversal-lymphatic-dependence"
      ],
      "communication_gap": "Physiologists study lymphatic function through clinical observations of lymphedema and animal lymph flow measurements while fluid mechanicists model flow in vessels and porous media; quantitative models of lymphatic pumping using fluid mechanics tools (FSI, peristaltic pump theory) are largely confined to biofluid mechanics journals not read by physiologists.\n",
      "translation_table": [
        {
          "field_a_term": "Starling equilibrium at capillary wall (physiology)",
          "field_b_term": "boundary condition for interstitial fluid pressure from Darcy flow through tissue (fluid mechanics)",
          "note": "Tissue interstitium is modeled as a porous medium; Darcy's law with Starling source term describes interstitial flow"
        },
        {
          "field_a_term": "lymphangion contraction cycle (physiology)",
          "field_b_term": "peristaltic pump driven by stretch-activated smooth muscle oscillations (fluid mechanics)",
          "note": "Lymphangions exhibit Frank-Starling behavior: contraction frequency and stroke volume increase with filling pressure"
        },
        {
          "field_a_term": "interstitial fluid pressure P_i (physiology)",
          "field_b_term": "pressure field in a porous medium coupled to lymphatic outlet boundary (fluid mechanics)",
          "note": "P_i is ~-3 to -1 mmHg subatmospheric in normal tissue; edema occurs when P_i rises above ~0 mmHg"
        },
        {
          "field_a_term": "lymphedema (physiology)",
          "field_b_term": "fluid accumulation due to lymphatic pump failure in a compliant porous medium (fluid mechanics)",
          "note": "Lymphedema is modeled as failure of the lymphatic pressure-flow relationship to keep pace with Starling filtration"
        }
      ],
      "references": [
        {
          "doi": "10.1152/ajpheart.00554.2009",
          "note": "Scallan et al. (2010) - interstitial fluid pressure: the revised Starling principle"
        },
        {
          "doi": "10.1098/rsif.2017.0015",
          "note": "Margaris & Black (2012) - modelling the lymphatic system: challenges and opportunities"
        }
      ],
      "last_reviewed": "2026-05-07",
      "file": "cross-domain/physiology-fluid-mechanics/b-lymphatic-drainage-interstitial-fluid.yaml"
    },
    {
      "id": "b-prospect-theory-loss-aversion",
      "title": "Prospect theory is the psychophysical analog of the Weber-Fechner law applied to monetary outcomes — the value function v(x) is the S-shaped transducer mapping objective monetary changes to subjective utility, with loss aversion (λ ≈ 2.25) encoding the asymmetric steepness for losses versus gains.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "Kahneman & Tversky's prospect theory (1979) replaces expected utility theory with a psychophysically grounded model of decision under uncertainty. The model has two components: a value function v(x) over outcomes and a probability weighting function π(p) over probabilities.\nThe value function is the",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-prospect-theory-neural-encoding"
      ],
      "communication_gap": "Psychophysics (loudness, brightness, pain perception) and economics (utility theory) have developed in parallel since Fechner (1860) and Bernoulli (1738) but rarely interact formally. Kahneman & Tversky noted the Weber-Fechner connection informally in 1979 but the formal mathematical equivalence has not been fully developed. Behavioral economists rarely cite psychophysics literature; psychophysicists rarely cite prospect theory. The neural implementation — where both should converge — is studied by neuroeconomics but without full integration of classical psychophysics.\n",
      "translation_table": [
        {
          "field_a_term": "objective monetary outcome x (gain or loss)",
          "field_b_term": "physical stimulus intensity I in psychophysics"
        },
        {
          "field_a_term": "subjective value v(x) = x^α (gains), -λ(-x)^β (losses)",
          "field_b_term": "perceived sensation L = k·log(I) (Weber-Fechner) or L = k·I^n (Stevens)"
        },
        {
          "field_a_term": "reference point (status quo, purchase price)",
          "field_b_term": "adaptation level / background stimulus in psychophysics"
        },
        {
          "field_a_term": "loss aversion coefficient λ ≈ 2.25",
          "field_b_term": "asymmetric gain/loss sensitivity (no direct psychophysical analog)"
        },
        {
          "field_a_term": "probability weighting function π(p)",
          "field_b_term": "psychophysical compression of the probability dimension"
        },
        {
          "field_a_term": "overweighting of small probabilities π(p) > p for p small",
          "field_b_term": "detection threshold effects in psychophysics (near-zero intensities feel larger)"
        },
        {
          "field_a_term": "risk aversion for gains (buy insurance)",
          "field_b_term": "concavity of value function for positive outcomes"
        },
        {
          "field_a_term": "risk seeking for losses (gamble to break even)",
          "field_b_term": "convexity of value function for negative outcomes"
        },
        {
          "field_a_term": "certainty effect (overvaluing certain vs. probable gains)",
          "field_b_term": "unit probability anchoring (p=1 maps to π=1, creating discontinuity)"
        }
      ],
      "references": [
        {
          "doi": "10.2307/1914185",
          "note": "Kahneman & Tversky (1979) Econometrica 47:263 — original prospect theory paper"
        },
        {
          "doi": "10.1007/BF00122574",
          "note": "Tversky & Kahneman (1992) J Risk Uncertain 5:297 — cumulative prospect theory"
        },
        {
          "note": "Weber (1834) De Pulsu, Resorptione, Auditu et Tactu — original Weber's law"
        },
        {
          "doi": "10.1016/0167-2681(80)90051-7",
          "note": "Thaler (1980) J Econ Behav Organ 1:39 — mental accounting and loss aversion applications"
        },
        {
          "doi": "10.1016/j.neuron.2009.09.019",
          "note": "Rangel, Camerer & Montague (2008) Nature Neurosci — neuroeconomics review"
        }
      ],
      "last_reviewed": "2026-05-06",
      "file": "cross-domain/psychology-economics/b-prospect-theory-loss-aversion.yaml"
    },
    {
      "id": "b-agent-based-surrogate-x-intervention-optimization",
      "title": "Agent-based simulation surrogates bridge mechanistic public-health modeling and machine-learned intervention optimization.",
      "source_domain": "",
      "target_domain": "",
      "status": "proposed",
      "bridge_claim": "Speculative analogy (to be empirically validated): Learned surrogates of expensive agent-based epidemic simulations can support policy search similarly to reduced-form intervention response surfaces in operational planning.",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-agent-surrogate-optimization-reduces-intervention-regret"
      ],
      "communication_gap": "Domain operators prioritize interpretable reliability diagnostics, while ML work often prioritizes aggregate accuracy without deployment-grade uncertainty audits.",
      "translation_table": [
        {
          "field_a_term": "model prior",
          "field_b_term": "domain prior",
          "note": "Both constrain inference in data-sparse regimes."
        },
        {
          "field_a_term": "uncertainty estimate",
          "field_b_term": "risk-aware decision support",
          "note": "Uncertainty quality determines practical utility."
        },
        {
          "field_a_term": "out-of-distribution behavior",
          "field_b_term": "deployment robustness",
          "note": "Shift sensitivity governs real-world reliability."
        }
      ],
      "references": [
        {
          "url": "https://www.who.int/",
          "note": "Outbreak response policy context."
        }
      ],
      "last_reviewed": "2026-05-08",
      "file": "cross-domain/public-health-machine-learning/b-agent-based-surrogate-x-intervention-optimization.yaml"
    },
    {
      "id": "b-epidemiological-aging-demographic-frailty",
      "title": "Epidemiological aging patterns — mortality acceleration with age following the Gompertz-Makeham law — are quantitatively explained by the demographic frailty model from biostatistics: unobserved individual frailty (a gamma-distributed random effect) acting multiplicatively on a baseline hazard produces apparent population-level deceleration of mortality at extreme old age, with the same mathematical structure as the mixture-distribution models used in survival analysis",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "Vaupel's frailty model shows that if individual mortality hazard is h_i(t) = z_i * h_0(t) where z_i is gamma-distributed frailty (mean 1, variance sigma^2), then the observed (marginal) population hazard h_bar(t) = h_0(t) / (1 + sigma^2 * H_0(t)) — this decelerates at old ages relative to h_0, even ",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-frailty-model-mortality-deceleration-test"
      ],
      "communication_gap": "Demographers and epidemiologists use Gompertz mortality models and actuarial tables while biostatisticians develop frailty survival models; the Vaupel frailty model bridges them but is still not routinely applied in clinical aging cohort analysis where the simpler Cox model without random effects is preferred.",
      "translation_table": [
        {
          "field_a_term": "individual frailty z_i (demographic frailty model)",
          "field_b_term": "random effect in mixed-effects survival model / gamma mixing variable (statistics)",
          "note": "Frailty is a multiplicative random effect on baseline hazard; mathematically equivalent to gamma-Poisson compounding"
        },
        {
          "field_a_term": "population mortality deceleration at extreme age (public health)",
          "field_b_term": "selection bias from heterogeneous mixing distribution at late event times (statistics)",
          "note": "Apparent deceleration arises from selective survival of robust (low-z) individuals; same as frail dropouts in clinical trials"
        },
        {
          "field_a_term": "Gompertz-Makeham baseline hazard h_0(t) = A*e^{bt} + C (epidemiology)",
          "field_b_term": "Weibull or exponential baseline in Cox proportional hazards model (statistics)",
          "note": "Gompertz is the canonical aging hazard baseline; Cox PH frailty model accommodates it identically"
        },
        {
          "field_a_term": "heterogeneity variance sigma^2 of frailty distribution (public health)",
          "field_b_term": "overdispersion parameter in negative binomial / variance-mean ratio (statistics)",
          "note": "Frailty variance sigma^2 quantifies population heterogeneity; higher sigma^2 produces stronger late-life deceleration"
        }
      ],
      "references": [
        {
          "doi": "10.2307/2061343",
          "note": "Vaupel et al. (1979) - the impact of heterogeneity in individual frailty on the dynamics of mortality (foundational frailty model)"
        },
        {
          "doi": "10.1146/annurev-publhealth-031816-044524",
          "note": "Aalen et al. (2015) - time-dependent hazard ratio: modeling and hypothesis testing with application to mortality"
        },
        {
          "doi": "10.1093/biomet/73.1.13",
          "note": "Clayton (1978) - a model for association in bivariate life tables and its application in epidemiological studies"
        }
      ],
      "last_reviewed": "2026-05-07",
      "file": "cross-domain/public-health-statistics/b-epidemiological-aging-demographic-frailty.yaml"
    },
    {
      "id": "b-quantum-biology-navigation",
      "title": "Migratory birds navigate using quantum entanglement in cryptochrome — the radical-pair mechanism is a room-temperature quantum sensor inside a living protein, operating at the precision limit set by quantum Fisher information.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "The magnetic compass of migratory songbirds is not a classical ferromagnetic sensor (like a compass needle) but a quantum device: photo-excited electron transfers in the flavin-adenine dinucleotide (FAD) cofactor of cryptochrome create radical pairs whose spin states are entangled and coherent, and ",
      "open_unknowns": [],
      "related_hypotheses": [],
      "communication_gap": "Quantum physicists and sensory biologists occupy completely separate conference circuits (APS/quantum-optics meetings vs. behavioural-biology and ornithology conferences). The mathematical language of the Cramér-Rao bound, quantum Fisher information, and Lindblad dynamics is inaccessible to most biologists; the specifics of cryptochrome genetics, radical-lifetime biochemistry, and avian retinal anatomy are inaccessible to most physicists. Grant panels for quantum biology remain rare; the field sits between BBSRC/NSF-biology and EPSRC/NSF-physics without a clear home in either.\n",
      "translation_table": [
        {
          "field_a_term": "radical pair (quantum chemistry)",
          "field_b_term": "magnetoreceptor state (sensory biology)",
          "note": "The physical substrate of the compass — two unpaired electrons in a protein"
        },
        {
          "field_a_term": "singlet-triplet ratio",
          "field_b_term": "cryptochrome signalling state",
          "note": "The quantum output that determines which downstream protein is activated"
        },
        {
          "field_a_term": "quantum Fisher information / Cramér-Rao bound",
          "field_b_term": "minimum detectable field angle",
          "note": "The theoretical precision limit — biology appears to approach it"
        },
        {
          "field_a_term": "decoherence time T2",
          "field_b_term": "cryptochrome radical lifetime",
          "note": "How long quantum coherence survives — must exceed ~1 microsecond for field sensitivity"
        },
        {
          "field_a_term": "spin-selective recombination",
          "field_b_term": "light-dependent magnetic field effect on behaviour",
          "note": "The mechanism linking quantum spin physics to measurable bird behaviour"
        },
        {
          "field_a_term": "Hamiltonian engineering (quantum control)",
          "field_b_term": "cryptochrome protein evolution",
          "note": "Evolution has \"engineered\" the protein scaffold to maximise quantum sensing performance"
        }
      ],
      "references": [
        {
          "arxiv": "2206.07355",
          "note": "Driven spin dynamics enhances cryptochrome magnetoreception — bio-ph harvest 2026-05-04"
        },
        {
          "arxiv": "2401.02923",
          "note": "Optimality of the radical-pair quantum compass — Cramér-Rao bound analysis"
        },
        {
          "doi": "10.1038/nature10140",
          "note": "Ritz et al. 2011 — behavioural evidence for radical-pair compass in birds"
        },
        {
          "doi": "10.1073/pnas.0904851106",
          "note": "Wiltschko et al. 2009 — cryptochrome as the magnetoreceptor"
        }
      ],
      "last_reviewed": "2026-05-04",
      "file": "cross-domain/quantum-biology/b-quantum-biology-navigation.yaml"
    },
    {
      "id": "b-qaoa-x-classical-surrogate-combinatorial-optimization",
      "title": "Quantum approximate optimization algorithms bridge discrete combinatorial optimization with classical surrogate warm-start and benchmarking workflows.",
      "source_domain": "",
      "target_domain": "",
      "status": "proposed",
      "bridge_claim": "Established baseline literature maps QAOA-style parameterized quantum circuits onto classical optimization landscapes; related speculative analogy (deployment-dependent): classical surrogate models trained on QAOA expectation traces may accelerate warm starts similarly to classical hyperparameter me",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-qaoa-parameter-transfer-improves-surrogate-warm-starts"
      ],
      "communication_gap": "Quantum hardware teams publish physics-centric calibration while classical optimization and ML venues rarely standardize cross-domain reporting of noise-aware surrogate error.",
      "translation_table": [
        {
          "field_a_term": "variational circuit parameters",
          "field_b_term": "surrogate model inputs",
          "note": "Parameter-response curves can seed classical regressors."
        },
        {
          "field_a_term": "expectation estimates / shots",
          "field_b_term": "noisy labels for supervised surrogates",
          "note": "Sampling noise couples statistical ML issues with quantum estimation."
        },
        {
          "field_a_term": "circuit depth schedule",
          "field_b_term": "curriculum / continuation schedule",
          "note": "Both trade bias-variance along an outer optimization loop."
        }
      ],
      "references": [
        {
          "arxiv": "1411.4028",
          "note": "Quantum Approximate Optimization Algorithm (Farhi et al.)."
        }
      ],
      "last_reviewed": "2026-05-09",
      "file": "cross-domain/quantum-computing-computer-science/b-qaoa-x-classical-surrogate-combinatorial-optimization.yaml"
    },
    {
      "id": "b-qkd-information-theoretic-security",
      "title": "Quantum key distribution achieves information-theoretic security (unconditional security independent of adversary computing power) by exploiting quantum measurement disturbance, bridging quantum computing and cryptography through the quantum no-cloning theorem and Shannon's one-time pad.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "BB84 quantum key distribution achieves information-theoretic security (proven secure against computationally unbounded adversaries) because any eavesdropping measurement on quantum states introduces detectable disturbance by the quantum no-cloning theorem; the resulting secure key rate r_key = H(X|E",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-qkd-satellite-global-scale-feasibility"
      ],
      "communication_gap": "Quantum information researchers develop QKD protocols and security proofs while cryptographers develop classical security models; the transition from information-theoretic security proofs to practical QKD implementations (accounting for side channels, finite key effects, device imperfections) requires expertise in both fields that is rarely combined.\n",
      "translation_table": [
        {
          "field_a_term": "quantum channel QBER (quantum error rate) (quantum computing)",
          "field_b_term": "mutual information I(X;E) between key and eavesdropper (information theory)",
          "note": "Higher QBER increases eavesdropper information; security requires QBER < 11% for BB84"
        },
        {
          "field_a_term": "quantum no-cloning theorem (quantum physics)",
          "field_b_term": "lower bound on disturbance from any measurement (cryptography)",
          "note": "No-cloning prevents copying quantum states; any measurement disturbs the state detectably"
        },
        {
          "field_a_term": "privacy amplification (QKD protocol)",
          "field_b_term": "information-theoretic hashing to reduce eavesdropper information (cryptography)",
          "note": "Universal hash families extract a shorter key with negligible eavesdropper information"
        },
        {
          "field_a_term": "BB84 protocol / E91 entanglement-based QKD (quantum computing)",
          "field_b_term": "key agreement with information-theoretic security (cryptography)",
          "note": "Both achieve Shannon-secure key distribution; E91 security proven via violation of Bell inequalities"
        }
      ],
      "references": [
        {
          "doi": "10.1007/BF00191318",
          "note": "Bennett & Brassard (1984/2014) - quantum cryptography, public key distribution and coin tossing"
        },
        {
          "doi": "10.1103/RevModPhys.74.145",
          "note": "Gisin et al. (2002) - quantum cryptography review; information-theoretic security proofs"
        },
        {
          "doi": "10.1126/science.abb0525",
          "note": "Chen et al. (2021) - twin-field QKD over 511 km of optical fiber"
        }
      ],
      "last_reviewed": "2026-05-07",
      "file": "cross-domain/quantum-computing-cryptography/b-qkd-information-theoretic-security.yaml"
    },
    {
      "id": "b-quantum-error-threshold-fault-tolerant-computing",
      "title": "The quantum fault-tolerance threshold theorem connects quantum error correction to information theory: if the physical error rate per gate p is below a threshold p_th (typically ~1% for surface codes), arbitrarily long quantum computations can be performed reliably by concatenating error-correcting codes, with overhead growing only polylogarithmically in computation length.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "For a concatenated code of level k with physical error rate p and threshold p_th, the logical error rate scales as p_L = p_th·(p/p_th)^{2^k}. Each level of concatenation doubles the exponent, so after k = log₂(log(1/ε)/log(1/p_th)) levels, logical error rate drops below ε. For a surface code on a 2D",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-topological-quantum-codes-optimal-threshold-constant-overhead"
      ],
      "communication_gap": "Quantum computing engineers designing error correction hardware and information theorists studying channel capacity share the threshold theorem framework, but the full LDPC-to-quantum-LDPC translation (especially for finite-rate codes) was only achieved in 2022 (Panteleev-Kalachev), showing a persistent gap between classical coding theory and quantum error correction despite shared mathematics.\n",
      "translation_table": [
        {
          "field_a_term": "physical qubit error rate p (quantum computing)",
          "field_b_term": "noise rate below threshold / subcritical disorder (information theory)",
          "note": "The fault-tolerance threshold is analogous to a percolation or Ising critical point"
        },
        {
          "field_a_term": "fault-tolerance threshold p_th (quantum computing)",
          "field_b_term": "critical error rate / channel capacity threshold (information theory)",
          "note": "Below p_th, error correction wins over error accumulation; above it, errors dominate"
        },
        {
          "field_a_term": "logical qubit encoded in surface code (quantum computing)",
          "field_b_term": "codeword in quantum error-correcting code (information theory)",
          "note": "The surface code is a topological code where logical operators are non-contractible loops"
        },
        {
          "field_a_term": "syndrome measurement (quantum computing)",
          "field_b_term": "parity check matrix measurement (information theory)",
          "note": "Syndrome extraction is the quantum analog of classical syndrome decoding in LDPC codes"
        }
      ],
      "references": [
        {
          "doi": "10.1103/PhysRevA.52.R2493",
          "note": "Shor (1995) - fault-tolerant quantum computation (original threshold concept)"
        },
        {
          "doi": "10.1103/PhysRevLett.85.910",
          "note": "Knill et al. (2001) - threshold accuracy for quantum computation"
        },
        {
          "doi": "10.1103/PhysRevA.86.032324",
          "note": "Wang et al. (2012) - surface code quantum computing (practical threshold ~1%)"
        }
      ],
      "last_reviewed": "2026-05-07",
      "file": "cross-domain/quantum-computing-error-correction/b-quantum-error-threshold-fault-tolerant-computing.yaml"
    },
    {
      "id": "b-stabilizer-codes-fault-tolerance",
      "title": "Quantum stabilizer codes are the quantum analog of classical linear codes — the threshold theorem proves that fault-tolerant quantum computation is achievable when physical error rates fall below approximately 1%.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "Quantum error correction (Shor 1995, Steane 1996) maps directly onto classical coding theory: a [[n, k, d]] quantum code encodes k logical qubits into n physical qubits with code distance d (able to correct ⌊(d−1)/2⌋ arbitrary errors). Stabilizer codes — defined by a commuting set of Pauli operators",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-surface-code-practical-threshold-2030"
      ],
      "communication_gap": "Quantum computing and classical coding theory are taught in separate departments (physics vs electrical engineering/computer science). The CSS construction and stabilizer formalism require both quantum mechanics and group theory, limiting practitioners who can work across the bridge. The threshold theorem is widely cited but rarely understood in detail outside quantum information theory.\n",
      "translation_table": [
        {
          "field_a_term": "[n, k, d] classical linear code",
          "field_b_term": "[[n, k, d]] quantum stabilizer code",
          "note": "Direct analog — CSS construction maps classical to quantum codes"
        },
        {
          "field_a_term": "parity check matrix H (classical)",
          "field_b_term": "stabilizer generators (Pauli operators)",
          "note": "Syndrome measurement = parity check in the quantum case"
        },
        {
          "field_a_term": "bit flip (0→1) and phase flip error types",
          "field_b_term": "Pauli X (bit flip) and Pauli Z (phase flip) errors",
          "note": "Quantum errors decompose into X, Z, Y = iXZ"
        },
        {
          "field_a_term": "classical code rate k/n",
          "field_b_term": "quantum code rate k/n (logical qubits per physical qubit)",
          "note": "Surface code rate = 1/n → 0 as n grows (overhead for fault tolerance)"
        },
        {
          "field_a_term": "minimum distance d (classical)",
          "field_b_term": "code distance d (quantum) — number of correctable errors = ⌊(d−1)/2⌋",
          "note": "Distance determines fault tolerance threshold"
        }
      ],
      "references": [
        {
          "note": "Shor (1995) Scheme for reducing decoherence in quantum computer memory. Phys Rev A 52:R2493"
        },
        {
          "note": "Steane (1996) Error correcting codes in quantum theory. Phys Rev Lett 77:793"
        },
        {
          "note": "Gottesman (1997) Stabilizer codes and quantum error correction. PhD thesis, Caltech"
        },
        {
          "note": "Fowler et al. (2012) Surface codes: towards practical large-scale quantum computation. Phys Rev A 86:032324"
        }
      ],
      "last_reviewed": "2026-05-06",
      "file": "cross-domain/quantum-computing-error-correction/b-stabilizer-codes-fault-tolerance.yaml"
    },
    {
      "id": "b-continuous-time-qwalk-x-grover-spatial-search-geometry",
      "title": "Continuous-time quantum walks on graphs underpin spatial-search constructions where marked vertices couple as potential shifts — embedding Grover-type quadratic speedups into Laplacian spectral geometry while preserving caveats about optimality on arbitrary graphs versus structured Johnson/hypercube families.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "Childs & Goldstone showed spatial search via continuous-time quantum walk locates a marked vertex on several graph families in O(√N) time by tuning a Hamiltonian built from the graph Laplacian plus a rank-one oracle term; this parallels Grover's diffusion operator construction over the equal superpo",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-johnson-graph-spectral-gap-predicts-ctqw-search-plateau"
      ],
      "communication_gap": "Quantum algorithms courses emphasize oracle Grover circuits while quantum transport papers emphasize Hamiltonian walks on physical graphs — engineers implementing searches must reconcile oracle compilation costs vs continuous analog protocols.\n",
      "translation_table": [
        {
          "field_a_term": "graph Laplacian L as kinetic generator of CTQW",
          "field_b_term": "diffusion / mixing operator structure in Grover's algorithm",
          "note": "Both exploit spectral gap and rotation in a low-dimensional invariant subspace for hit probability buildup."
        },
        {
          "field_a_term": "oracle Hamiltonian / marked-site potential",
          "field_b_term": "phase flip / oracle marking in Grover iterate",
          "note": "Formal roles align at high level; gate-model resource accounting differs."
        },
        {
          "field_a_term": "Johnson graph / hypercube special-structure proofs",
          "field_b_term": "optimal √N scaling regimes for structured search problems",
          "note": "Optimality is graph-dependent; arbitrary-graph spatial search can fail to match Grover speedups without extra structure."
        }
      ],
      "references": [
        {
          "doi": "10.1103/PhysRevA.70.022314",
          "note": "Childs & Goldstone (2004) — Spatial search by quantum walk; Phys Rev A 70:022314."
        },
        {
          "doi": "10.1103/PhysRevLett.79.325",
          "note": "Grover (1997) — quantum mechanics speeds database search; Phys Rev Lett 79:325."
        }
      ],
      "last_reviewed": "2026-05-07",
      "file": "cross-domain/quantum-computing-probability/b-continuous-time-qwalk-x-grover-spatial-search-geometry.yaml"
    },
    {
      "id": "b-quantum-annealing-simulated-annealing",
      "title": "Quantum annealing replaces thermal fluctuations with quantum tunneling: the transverse-field Ising model H=-Γ(t)Σσᵢˣ - J·Σσᵢᶻσⱼᶻ maps optimization onto adiabatic quantum evolution, generalizing simulated annealing",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "Simulated annealing (SA) solves combinatorial optimization by sampling from the Boltzmann distribution P(s) ∝ exp(-E(s)/T), decreasing T to concentrate probability on the minimum. Quantum annealing (QA) adds a transverse field Γ(t): H(t) = -Γ(t)·Σᵢσᵢˣ - Σᵢⱼ Jᵢⱼσᵢᶻσⱼᶻ. At t=0, Γ large: ground state i",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-quantum-annealing-simulated-annealing"
      ],
      "communication_gap": "Operations researchers who use simulated annealing for scheduling and logistics optimization are largely unaware of the mathematical connection to quantum annealing and the Ising model. Quantum computing researchers working on QA often present it as a hardware problem without connection to the classical combinatorics literature where problem hardness (phase transitions, typical-case complexity) is well-characterized.\n",
      "translation_table": [
        {
          "field_a_term": "simulated annealing temperature schedule T(t)",
          "field_b_term": "quantum annealing transverse field schedule Γ(t)",
          "note": "Both are 'annealing schedules' driving a phase transition from disordered to ordered state"
        },
        {
          "field_a_term": "thermal excitation over energy barrier ΔE (classical)",
          "field_b_term": "quantum tunneling through energy barrier — WKB tunneling rate",
          "note": "Quantum tunneling rate ∝ exp(-√(m·ΔE)·W/ℏ) vs thermal rate exp(-ΔE/T)"
        },
        {
          "field_a_term": "combinatorial optimization problem QUBO: min xᵀQx",
          "field_b_term": "Ising model energy E = Σᵢⱼ Jᵢⱼσᵢσⱼ + Σᵢhᵢσᵢ",
          "note": "All QUBO problems map to Ising with J_ij = Q_ij/4, h_i from diagonal Q"
        },
        {
          "field_a_term": "D-Wave 5000Q hardware graph connectivity",
          "field_b_term": "Chimera/Pegasus graph structure limiting which Jᵢⱼ can be nonzero",
          "note": "Problem sparsity must match hardware graph or minor embedding adds overhead qubits"
        }
      ],
      "references": [
        {
          "doi": "10.1126/science.220.4598.671",
          "note": "Kirkpatrick, Gelatt & Vecchi (1983) Optimization by simulated annealing. Science 220:671"
        },
        {
          "doi": "10.1126/science.284.5415.779",
          "note": "Kadowaki & Nishimori (1998) Quantum annealing in the transverse Ising model. Phys Rev E 58:5355"
        },
        {
          "doi": "10.1038/s41586-018-0170-0",
          "note": "Hauke et al. (2020) Perspectives of quantum annealing. Rep Prog Phys 83:054401"
        }
      ],
      "last_reviewed": "2026-05-07",
      "file": "cross-domain/quantum-computing-probability/b-quantum-annealing-simulated-annealing.yaml"
    },
    {
      "id": "b-quantum-walks-random-walks",
      "title": "Quantum walks generalize classical random walks by allowing quantum superposition of paths, achieving quadratically faster spreading (sigma ~ t vs t^1/2) and providing the computational primitive for quantum speedup in graph algorithms.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "The discrete-time quantum walk on a line replaces the classical coin flip (probability distribution P(x,t) satisfying the diffusion equation) with a unitary coin operator C acting on a qubit; the resulting interference between amplitudes produces ballistic spreading sigma(t) ~ t (vs diffusive sigma ",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-quantum-walk-spatial-search-optimal"
      ],
      "communication_gap": "Probabilists developing Markov chain mixing time theory and quantum information theorists studying quantum algorithms use parallel mathematical structures but rarely collaborate; the Szegedy quantization theorem (2004) that formally derives quantum walk speedup from the classical spectral gap is known to quantum computing experts but not widely taught in probability courses.\n",
      "translation_table": [
        {
          "field_a_term": "position probability distribution P(x,t) (classical random walk)",
          "field_b_term": "quantum amplitude psi(x,t) with P = |psi|^2 (quantum walk)",
          "note": "Amplitudes interfere; the absolute square gives probabilities, enabling coherent speedup"
        },
        {
          "field_a_term": "transition matrix T (random walk)",
          "field_b_term": "unitary walk operator U = S*(C⊗I) (quantum walk)",
          "note": "U replaces stochastic T; unitarity ensures probability conservation without decoherence"
        },
        {
          "field_a_term": "mixing time / hitting time (random walk)",
          "field_b_term": "quantum walk speedup: O(sqrt(N)) vs O(N) hitting time",
          "note": "Quadratic speedup arises from ballistic vs diffusive spreading of quantum amplitudes"
        },
        {
          "field_a_term": "Markov chain stationary distribution (probability)",
          "field_b_term": "quantum walk eigenstate / uniform superposition (quantum computing)",
          "note": "Quantum walks lack a stationary distribution (unitary dynamics); approximating eigenstates is the analog"
        }
      ],
      "references": [
        {
          "doi": "10.48550/arXiv.quant-ph/0012090",
          "note": "Aharonov et al. (2001) - quantum walks on graphs, original discrete-time model"
        },
        {
          "doi": "10.1145/1060590.1060639",
          "note": "Szegedy (2004) - quantum speed-up of Markov chain based algorithms"
        },
        {
          "doi": "10.1103/PhysRevLett.102.180501",
          "note": "Childs et al. (2009) - universal computation by multi-particle quantum walk"
        }
      ],
      "last_reviewed": "2026-05-07",
      "file": "cross-domain/quantum-computing-probability/b-quantum-walks-random-walks.yaml"
    },
    {
      "id": "b-topological-quantum-computing-anyons",
      "title": "Topological quantum computing encodes qubits in non-Abelian anyons — quasiparticle excitations of topological phases whose braiding operations implement quantum gates by exchanging particle worldlines, with error correction guaranteed topologically because qubit states are stored in the globally degenerate ground state subspace inaccessible to local perturbations",
      "source_domain": "",
      "target_domain": "",
      "status": "proposed",
      "bridge_claim": "Non-Abelian anyons (e.g., Fibonacci anyons, Majorana zero modes) in 2D topological phases have a braid group representation where exchanging anyons i and j applies a unitary gate U(σ_ij) on the degenerate ground state space; since U depends only on the topology of the worldline braid (not the geomet",
      "open_unknowns": [],
      "related_hypotheses": [],
      "communication_gap": "Quantum computing researchers focus on gate fidelity and error rates in physical implementations while topologists develop the mathematical theory of braided tensor categories and anyonic models; translating topological invariant calculations (topological quantum dimension, F-matrices, R-matrices) into practical quantum circuit design requires bridging abstract category theory and experimental physics.",
      "translation_table": [
        {
          "field_a_term": "non-Abelian anyon braiding (quantum computing)",
          "field_b_term": "non-Abelian representation of braid group B_n (topology)",
          "note": "Each anyon exchange σ_i acts as a matrix in the topological Hilbert space; B_n generators satisfy braid relations σ_i*σ_{i+1}*σ_i = σ_{i+1}*σ_i*σ_{i+1}"
        },
        {
          "field_a_term": "topological qubit ground state degeneracy (quantum computing)",
          "field_b_term": "topological invariant (genus of surface / anyonic fusion space dimension) (topology)",
          "note": "Ground state degeneracy of fractional quantum Hall states on a torus = (topological quantum dimension)^2 of anyons"
        },
        {
          "field_a_term": "topological quantum gate via anyon fusion path (quantum computing)",
          "field_b_term": "path-independent holonomy in fiber bundle over anyon configuration space (topology)",
          "note": "Gate matrix depends only on homotopy class of braiding path; analogous to Berry phase holonomy"
        },
        {
          "field_a_term": "Majorana zero mode as topological qubit (quantum computing)",
          "field_b_term": "zero-energy eigenstate of Bogoliubov-de Gennes Hamiltonian at topological phase boundary (topology)",
          "note": "Majorana modes localize at interfaces between topological and trivial superconductor phases; braiding implements non-Abelian gate"
        }
      ],
      "references": [
        {
          "doi": "10.1103/RevModPhys.80.1083",
          "note": "Nayak et al. (2008) - non-Abelian anyons and topological quantum computation"
        },
        {
          "doi": "10.1103/PhysRevLett.86.268",
          "note": "Kitaev (2001) - unpaired Majorana fermions in quantum wires"
        },
        {
          "doi": "10.1016/j.aop.2007.04.007",
          "note": "Freedman et al. (2003) - topological quantum computation with Fibonacci anyons"
        }
      ],
      "last_reviewed": "2026-05-07",
      "file": "cross-domain/quantum-computing-topology/b-topological-quantum-computing-anyons.yaml"
    },
    {
      "id": "b-quantum-coherence-photosynthesis",
      "title": "Femtosecond spectroscopy reveals long-lived quantum coherence in the Fenna-Matthews-Olson (FMO) light-harvesting complex — energy transfer occurs via quantum superposition across chromophores rather than classical Förster hopping, and the same Lindblad master equation formalism that governs qubit decoherence in quantum computing describes coherence loss in biological light-harvesting at physiological temperatures.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "contested",
      "bridge_claim": "In 2007, Engel et al. (Nature 446:782) used two-dimensional electronic spectroscopy (2DES) at 77 K and 277 K to observe oscillatory cross-peaks in the FMO complex of green sulfur bacteria (Chlorobaculum tepidum) with coherence lifetimes ~ 660 fs at 77 K and visible even at physiological temperatures",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-fmo-enaqt-efficiency"
      ],
      "communication_gap": "Quantum information theorists and quantum chemists use overlapping but distinct formalism (Lindblad vs Redfield vs HEOM) with different default approximations. Photosynthesis biophysicists and quantum computing physicists rarely co-author; the 2007 Engel et al. paper was written by a physical chemistry group (Fleming, UC Berkeley) that bridges both worlds, but most photosynthesis researchers and most quantum computing researchers remain unaware of each other's literature on coherence engineering.\n",
      "translation_table": [
        {
          "field_a_term": "qubit (two-level quantum system)",
          "field_b_term": "bacteriochlorophyll chromophore (two-level excitonic system)"
        },
        {
          "field_a_term": "T₂ dephasing time (quantum computing)",
          "field_b_term": "electronic coherence lifetime in FMO (~660 fs at 77 K)"
        },
        {
          "field_a_term": "Lindblad dissipator (open quantum system)",
          "field_b_term": "protein scaffold phonon bath (Debye-Waller dephasing)"
        },
        {
          "field_a_term": "quantum gate fidelity (quantum computing)",
          "field_b_term": "excitation transfer efficiency to reaction centre (photosynthesis)"
        },
        {
          "field_a_term": "environment-assisted quantum transport (ENAQT)",
          "field_b_term": "vibration-assisted energy transfer in light-harvesting complexes"
        },
        {
          "field_a_term": "quantum walk on graph (algorithms)",
          "field_b_term": "exciton diffusion on BChl network (FMO topology)"
        }
      ],
      "references": [
        {
          "doi": "10.1038/nature05678",
          "note": "Engel et al. (2007) Evidence for wavelike energy transfer through quantum coherence in photosynthetic systems, Nature 446:782"
        },
        {
          "doi": "10.1063/1.3155372",
          "note": "Ishizaki & Fleming (2009) Unified treatment of quantum coherent and incoherent hopping dynamics in electronic energy transfer, J Chem Phys 130:234111"
        },
        {
          "doi": "10.1103/PhysRevLett.103.146404",
          "note": "Rebentrost et al. (2009) Environment-assisted quantum transport, PRL 103:146404"
        },
        {
          "doi": "10.1021/jp405421d",
          "note": "Scholes et al. (2017) Using coherence to enhance function in chemical and biophysical systems, Nature 543:647"
        },
        {
          "doi": "10.1063/1.3293413",
          "note": "Ishizaki & Fleming (2009) Theoretical examination of quantum coherence in a photosynthetic system at physiological temperature, PNAS 106:17255"
        }
      ],
      "last_reviewed": "2026-05-05",
      "file": "cross-domain/quantum-physics-biology/b-quantum-coherence-photosynthesis.yaml"
    },
    {
      "id": "b-quantum-tunneling-enzyme-catalysis",
      "title": "Quantum tunneling of protons and electrons contributes to enzyme catalysis beyond classical transition state theory — measured by anomalously large H/D kinetic isotope effects in alcohol dehydrogenase and aromatic amine dehydrogenase — establishing quantum mechanics as a functional component of room-temperature biochemistry.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "Quantum tunneling — transmission through a potential energy barrier classically forbidden to a particle — is not merely a curiosity at cryogenic temperatures but a quantitatively significant contributor to enzyme catalysis at physiological temperatures.\n1. Tunneling rate formula. The WKB (Wentzel-Kr",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-protein-dynamics-optimize-quantum-tunneling"
      ],
      "communication_gap": "Quantum physicists and biochemists rarely collaborate directly. Quantum biology is an emerging interdisciplinary field (Al-Khalili & McFadden book 2014) but remains contested. The enzyme tunneling evidence is in well-reviewed experimental biochemistry journals (Annual Review of Biochemistry, Accounts of Chemical Research), not in physics journals, so it is underappreciated in the physics community. Conversely, biologists unfamiliar with quantum mechanics miss the mechanistic explanations provided by WKB theory and PCET formalism.\n",
      "translation_table": [
        {
          "field_a_term": "quantum tunneling rate k_tunnel ∝ exp(-2κd) (quantum mechanics)",
          "field_b_term": "hydrogen transfer rate in enzyme catalysis (biochemistry)",
          "note": "WKB tunneling formula describes the quantum component of proton transfer"
        },
        {
          "field_a_term": "de Broglie wavelength λ = h/√(2mE) (quantum mechanics)",
          "field_b_term": "particle mass dependence of KIE (H vs D vs T) (enzymology)",
          "note": "Heavier isotopes have shorter de Broglie wavelength — less tunneling"
        },
        {
          "field_a_term": "potential energy barrier U(x) in quantum mechanics",
          "field_b_term": "activation barrier in enzyme active site (biochemistry)",
          "note": "The enzyme active site geometry shapes the quantum tunneling barrier"
        },
        {
          "field_a_term": "barrier width d in WKB formula (quantum mechanics)",
          "field_b_term": "proton donor-acceptor distance in enzyme (biochemistry)",
          "note": "Protein conformational dynamics compresses d to optimize tunneling"
        },
        {
          "field_a_term": "transmission coefficient κ(E) in quantum mechanics",
          "field_b_term": "KIE = k_H/k_D ratio (kinetic isotope effect) (enzymology)",
          "note": "KIE > 7 is the experimental hallmark of quantum tunneling in enzymes"
        },
        {
          "field_a_term": "proton-coupled electron transfer (PCET) theory (quantum chemistry)",
          "field_b_term": "oxidoreductase mechanism (hydride/proton transfer enzymes)",
          "note": "PCET theory quantitatively models tunneling in alcohol dehydrogenase"
        }
      ],
      "references": [
        {
          "doi": "10.1021/bi990941v",
          "note": "Scrutton et al. (1999) Biochemistry 38:10844 — tunneling in enzyme-catalyzed hydrogen transfer"
        },
        {
          "doi": "10.1021/ar050201z",
          "note": "Hammes-Schiffer (2006) Acc Chem Res 39:93 — hydrogen tunneling and protein motion in enzyme reactions"
        },
        {
          "doi": "10.1146/annurev-biochem-051710-133623",
          "note": "Klinman & Kohen (2013) Annu Rev Biochem 82:471 — hydrogen tunneling links protein dynamics to enzyme catalysis"
        },
        {
          "doi": "10.1046/j.1432-1033.2002.03018.x",
          "note": "Sutcliffe & Scrutton (2002) Eur J Biochem 269:3096 — a new conceptual framework for enzyme catalysis"
        }
      ],
      "last_reviewed": "2026-05-06",
      "file": "cross-domain/quantum-physics-biology/b-quantum-tunneling-enzyme-catalysis.yaml"
    },
    {
      "id": "b-exotic-matter-casimir-negative-energy",
      "title": "The Casimir effect demonstrates that quantum vacuum fluctuations between conducting plates produce a measurable attractive force via negative energy density — the same exotic matter with negative energy density that general relativity requires for traversable wormholes and warp drives, making the Casimir effect the only laboratory-scale demonstration of negative energy.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "proposed",
      "bridge_claim": "General relativity permits exotic geometries (traversable wormholes, Alcubierre warp metric) that require regions of negative energy density to satisfy the Einstein field equations. Quantum field theory provides the only known mechanism for negative energy: the Casimir effect, where boundary conditi",
      "open_unknowns": [],
      "related_hypotheses": [],
      "communication_gap": "",
      "translation_table": [
        {
          "field_a_term": "negative energy density rho < 0",
          "field_b_term": "Casimir energy density between plates: U = -pi^2 hbar c / (720 d^4)",
          "note": "The Casimir energy density is negative — it is the only laboratory-accessible example"
        },
        {
          "field_a_term": "exotic matter (GR requirement)",
          "field_b_term": "quantum vacuum in boundary-modified region",
          "note": "GR calls it exotic matter; QFT calls it boundary-modified vacuum — same stress-energy"
        },
        {
          "field_a_term": "energy condition violation (WEC: T_uv u^mu u^nu >= 0)",
          "field_b_term": "Casimir effect violates the weak energy condition",
          "note": "Classical matter satisfies WEC; Casimir vacuum does not — enabling exotic geometries"
        },
        {
          "field_a_term": "Ford-Roman quantum inequality",
          "field_b_term": "constraint on Casimir force magnitude and duration",
          "note": "Sets minimum plate separation for given negative energy — limits wormhole size"
        }
      ],
      "references": [
        {
          "doi": "10.1103/PhysRevLett.78.5",
          "note": "Lamoreaux (1997) — demonstration of the Casimir force in the 0.6 to 6 um range"
        },
        {
          "doi": "10.1103/PhysRevD.48.4929",
          "note": "Ford & Roman (1993) — quantum field theory constrains traversable wormhole geometries"
        },
        {
          "doi": "10.1103/PhysRevD.13.2720",
          "note": "Morris & Thorne (1988) — wormholes in spacetime and their use for interstellar travel"
        }
      ],
      "last_reviewed": "2026-05-07",
      "file": "cross-domain/quantum-physics-cosmology/b-exotic-matter-casimir-negative-energy.yaml"
    },
    {
      "id": "b-quantum-decoherence-einselection",
      "title": "Quantum decoherence selects pointer states through einselection: the preferred basis that survives entanglement with the environment is determined by the system-environment interaction Hamiltonian, explaining the emergence of classical reality from quantum superpositions",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "Environment-induced superselection (einselection) identifies pointer states as eigenstates of the system observable that commutes with the system-environment interaction Hamiltonian H_int, explaining why macroscopic objects appear classical: coherence between pointer states decays at rate Gamma ~ (x",
      "open_unknowns": [],
      "related_hypotheses": [],
      "communication_gap": "Quantum physicists study decoherence as a dynamical process while information theorists analyze it as information flow; quantum Darwinism connecting objectivity to redundant information encoding in the environment is known in foundations of QM but rarely discussed in quantum information curricula.",
      "translation_table": [
        {
          "field_a_term": "pointer states (preferred basis)",
          "field_b_term": "eigenstates of operator commuting with H_int",
          "note": "Pointer states are robust: they are not scrambled by interaction with environment; determined by symmetry of H_int"
        },
        {
          "field_a_term": "decoherence rate Gamma",
          "field_b_term": "information leakage rate from system to environment",
          "note": "Gamma is the rate at which environment gains information about system state; fast decoherence = fast information transfer"
        },
        {
          "field_a_term": "thermal de Broglie wavelength lambda_th",
          "field_b_term": "coherence length scale for position superpositions",
          "note": "Superpositions of positions |x1> and |x2> decohere at rate proportional to (x1-x2)^2/lambda_th^2"
        },
        {
          "field_a_term": "quantum Darwinism: environment as witness",
          "field_b_term": "mutual information I(S:F) between system and environment fragment F",
          "note": "Classical objectivity emerges when many environment fragments each carry full information about pointer states"
        }
      ],
      "references": [
        {
          "doi": "10.1103/RevModPhys.75.715",
          "note": "Zurek (2003) Rev Mod Phys - decoherence, einselection, and the quantum origins of the classical"
        },
        {
          "doi": "10.1038/nature02544",
          "note": "Schlosshauer (2004) Rev Mod Phys - decoherence and the measurement problem"
        },
        {
          "doi": "10.1038/nphys1202",
          "note": "Zurek (2009) Nat Phys - quantum Darwinism: objectivity from quantum redundancy"
        }
      ],
      "last_reviewed": "2026-05-07",
      "file": "cross-domain/quantum-physics-information/b-quantum-decoherence-einselection.yaml"
    },
    {
      "id": "b-quantum-error-correction-holography",
      "title": "Quantum error-correcting codes (stabilizer codes, surface codes) and the holographic principle in quantum gravity (AdS/CFT) are the same mathematical structure: bulk operators in AdS are encoded in boundary CFT degrees of freedom via a quantum error-correcting code, with the Ryu-Takayanagi formula (S = A/4G_N) expressing entanglement entropy as a quantum error-correction redundancy statement.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "proposed",
      "bridge_claim": "Quantum error correction encodes k logical qubits in n physical qubits with distance d (denoted [[n,k,d]]), such that any error affecting fewer than d/2 qubits can be detected and corrected. The key property is redundancy: the logical information is non-locally distributed across the physical qubits",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-ads-cft-quantum-error-correction-island-formula"
      ],
      "communication_gap": "The quantum error correction community (quantum computing, quantum information) and the quantum gravity / string theory community work in largely separate journals (Phys Rev A vs JHEP) with different language and training. Almheiri, Dong, and Harlow's 2015 paper was a landmark bridge, but it is read mainly in the quantum gravity literature. Working quantum error correction engineers building surface code chips are unaware that their code geometry has a gravitational dual. Conversely, string theorists developing the island formula rarely read the quantum error correction engineering literature where fault-tolerance threshold theorems might offer new tools for the information paradox.\n",
      "translation_table": [
        {
          "field_a_term": "Physical qubits (n) in stabilizer code [[n,k,d]]",
          "field_b_term": "Boundary CFT degrees of freedom",
          "note": "The n boundary qubits are the physical storage medium for bulk information"
        },
        {
          "field_a_term": "Logical qubits (k) — encoded information",
          "field_b_term": "Bulk quantum field theory operators in AdS",
          "note": "The k logical qubits are the bulk operators reconstructable from the boundary"
        },
        {
          "field_a_term": "Code distance d (error correction capacity)",
          "field_b_term": "RT minimal surface area (in Planck units)",
          "note": "S = A/4G_N is the code distance: how much boundary must be erased to destroy bulk info"
        },
        {
          "field_a_term": "Erasure error on boundary region A",
          "field_b_term": "Tracing out boundary region A (partial trace)",
          "note": "Code corrects erasure iff bulk is recoverable from complement Ā of the erased region"
        },
        {
          "field_a_term": "Perfect tensor (maximally entangled across bipartitions)",
          "field_b_term": "Local AdS geometry tensor network vertex",
          "note": "Pastawski et al.'s HaPPY code uses [[5,1,3]] perfect tensors as AdS vertices"
        },
        {
          "field_a_term": "Stabilizer generators",
          "field_b_term": "Boundary symmetry generators / modular Hamiltonians",
          "note": "The stabilizer group of the holographic code corresponds to the boundary modular flow"
        },
        {
          "field_a_term": "Quantum Singleton bound (S ≤ n-k)",
          "field_b_term": "Ryu-Takayanagi formula S(A) = Area(γ_A)/4G_N",
          "note": "RT formula is the Singleton bound of the holographic code, saturated by the minimal surface"
        }
      ],
      "references": [
        {
          "doi": "10.1103/PhysRevLett.96.181602",
          "note": "Ryu & Takayanagi (2006) Phys Rev Lett 96:181602 — holographic derivation of entanglement entropy; RT formula"
        },
        {
          "doi": "10.1007/JHEP04(2015)163",
          "note": "Almheiri, Dong & Harlow (2015) JHEP 2015:163 — bulk locality and quantum error correction in AdS/CFT"
        },
        {
          "doi": "10.1007/JHEP06(2015)149",
          "note": "Pastawski et al. (2015) JHEP 2015:149 — holographic quantum error-correcting codes; HaPPY code with perfect tensors"
        },
        {
          "arxiv": "hep-th/9711200",
          "note": "Maldacena (1997) Int J Theor Phys 38:1113 — the large N limit of superconformal field theories and supergravity; AdS/CFT"
        },
        {
          "doi": "10.1103/PhysRevD.50.6394",
          "note": "Page (1993) Phys Rev Lett 71:3743 — information in black hole radiation; Page curve"
        }
      ],
      "last_reviewed": "2026-05-05",
      "file": "cross-domain/quantum-physics-information/b-quantum-error-correction-holography.yaml"
    },
    {
      "id": "b-quantum-gravity-holographic-entropy",
      "title": "The Ryu-Takayanagi formula equates the entanglement entropy of a boundary CFT region to the area of the minimal bulk surface divided by 4G, connecting quantum gravity geometry to quantum information theory through holography",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "The holographic entanglement entropy formula S_A = Area(gamma_A) / (4*G_N*hbar) (Ryu-Takayanagi) states that entanglement entropy of boundary region A in a CFT equals the area of the minimal bulk surface gamma_A anchored to partial_A in units of the Planck length, providing a geometric bulk dual of ",
      "open_unknowns": [],
      "related_hypotheses": [],
      "communication_gap": "String theorists study holography and the RT formula while quantum information theorists study entanglement entropy; the translation between the geometric (area) and information-theoretic (entropy) languages requires expertise in AdS/CFT rarely taught in QI curricula.",
      "translation_table": [
        {
          "field_a_term": "Von Neumann entanglement entropy S_A = -Tr(rho_A log rho_A)",
          "field_b_term": "area of minimal bulk geodesic surface gamma_A in AdS",
          "note": "RT formula: S_A = min(Area(gamma_A)) / 4G_N; quantum corrections give S = Area/4G + S_bulk"
        },
        {
          "field_a_term": "mutual information I(A:B) = S_A + S_B - S_AB in CFT",
          "field_b_term": "connected vs disconnected bulk minimal surfaces",
          "note": "Mutual information undergoes phase transition when dominant saddle switches from connected to disconnected surface"
        },
        {
          "field_a_term": "quantum error correction in boundary CFT",
          "field_b_term": "bulk reconstruction behind the RT surface",
          "note": "Bulk operators within the entanglement wedge of A can be reconstructed from CFT region A: Rindler-HKLL reconstruction"
        },
        {
          "field_a_term": "black hole information paradox",
          "field_b_term": "Page curve from RT surface transition: information returns after Page time",
          "note": "The island formula (RT with islands) resolves the information paradox: S_rad follows Page curve"
        }
      ],
      "references": [
        {
          "doi": "10.1103/PhysRevLett.96.181602",
          "note": "Ryu & Takayanagi (2006) PRL - holographic derivation of entanglement entropy from AdS/CFT"
        },
        {
          "doi": "10.1007/JHEP11(2014)163",
          "note": "Pastawski et al. (2015) JHEP - holographic quantum error-correcting codes from perfect tensors"
        },
        {
          "doi": "10.1007/JHEP08(2019)127",
          "note": "Almheiri et al. (2019) - islands and the entropy of Hawking radiation from the Page curve"
        }
      ],
      "last_reviewed": "2026-05-07",
      "file": "cross-domain/quantum-physics-information/b-quantum-gravity-holographic-entropy.yaml"
    },
    {
      "id": "b-topological-insulators-band-theory",
      "title": "Topological insulators are materials with insulating bulk but conducting surface states protected by time-reversal symmetry — classified by topological invariants (Z₂, Chern number) from algebraic topology applied to electronic band theory, with applications to fault-tolerant quantum computing via Majorana edge modes.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "Topological insulators (TIs) are a phase of matter where the bulk band structure has a non-trivial topological invariant, even though the material is an insulator in the bulk. The topological invariant (Z₂ for time-reversal-invariant TIs; Chern number n ∈ Z for quantum Hall systems) is a global prop",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-topological-qubit-fault-tolerance-threshold"
      ],
      "communication_gap": "Algebraic topology (K-theory, fiber bundles) is rarely taught in condensed matter physics curricula; conversely, band theory and materials synthesis are outside the typical pure mathematics education. The profound result that K-theory classifies all topological phases of matter (the tenfold way, Kitaev 2009) required cross-pollination between abstract mathematics and experimental materials physics that happened over a decade through a handful of key papers.\n",
      "translation_table": [
        {
          "field_a_term": "topological invariant (Z₂, Chern number)",
          "field_b_term": "band structure global phase / topological phase label",
          "note": "A non-zero invariant classifies the band bundle over the Brillouin zone as topologically non-trivial"
        },
        {
          "field_a_term": "bulk-boundary correspondence",
          "field_b_term": "topologically protected surface/edge states",
          "note": "Non-trivial bulk invariant guarantees gapless boundary states robust to disorder"
        },
        {
          "field_a_term": "Majorana zero mode",
          "field_b_term": "non-Abelian anyon for quantum computing",
          "note": "MZMs at TI-superconductor interface enable topological qubit gates via braiding"
        },
        {
          "field_a_term": "K-theory / algebraic topology",
          "field_b_term": "classification of topological phases (10-fold way)",
          "note": "The tenfold way classifies all topological phases by dimensionality and symmetry class"
        },
        {
          "field_a_term": "Berry curvature / Berry phase",
          "field_b_term": "anomalous Hall conductivity; Chern-Simons response",
          "note": "The integrated Berry curvature over the Brillouin zone equals the Chern number"
        }
      ],
      "references": [
        {
          "doi": "10.1103/PhysRevLett.95.146802",
          "note": "Kane, C.L. & Mele, E.J. (2005). Z₂ topological order and the quantum spin Hall effect. Phys. Rev. Lett. 95:146802."
        },
        {
          "doi": "10.1126/science.1133734",
          "note": "Bernevig, B.A., Hughes, T.L. & Zhang, S.-C. (2006). Quantum spin Hall effect and topological phase transition in HgTe quantum wells. Science 314:1757."
        },
        {
          "doi": "10.1103/RevModPhys.82.3045",
          "note": "Hasan, M.Z. & Kane, C.L. (2010). Colloquium: Topological insulators. Rev. Mod. Phys. 82:3045."
        },
        {
          "doi": "10.1103/PhysRevLett.100.096407",
          "note": "Fu, L. & Kane, C.L. (2008). Superconducting proximity effect and Majorana fermions at the surface of a topological insulator. Phys. Rev. Lett. 100:096407."
        }
      ],
      "last_reviewed": "2026-05-06",
      "file": "cross-domain/quantum-physics-materials-science/b-topological-insulators-band-theory.yaml"
    },
    {
      "id": "b-entanglement-tensor-network-states",
      "title": "Quantum entanglement structure in many-body systems is exactly captured by tensor network states (MPS, PEPS, MERA), where the entanglement entropy S ∝ area of a region is encoded as the bond dimension χ of inter-tensor contractions, providing a mathematical framework that connects quantum information geometry to condensed-matter physics",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "The entanglement structure of a quantum many-body ground state determines the minimal tensor network representation: for 1D gapped systems the entanglement entropy satisfies area law S(A) ≤ const, which corresponds to a matrix product state (MPS) with bond dimension χ = O(1); for critical systems S(",
      "open_unknowns": [],
      "related_hypotheses": [],
      "communication_gap": "Quantum physicists study entanglement in specific Hamiltonians while mathematicians develop tensor network algorithms and contractioncomplexity theory; the connection between the physical entanglement geometry and the mathematical tensor network structure is the foundation of modern quantum simulation but spans both communities without a shared language.",
      "translation_table": [
        {
          "field_a_term": "entanglement entropy S(A) = -Tr(ρ_A log ρ_A) (quantum physics)",
          "field_b_term": "bond dimension χ of the tensor network (mathematics)",
          "note": "S(A) ≤ log(χ) for MPS; area-law states have polynomial χ while volume-law states require exponential χ"
        },
        {
          "field_a_term": "area law for gapped ground states (quantum physics)",
          "field_b_term": "matrix product state representation with O(1) bond dimension (mathematics)",
          "note": "Hastings theorem proves area law for 1D gapped Hamiltonians, guaranteeing efficient MPS representation"
        },
        {
          "field_a_term": "quantum criticality / conformal field theory (quantum physics)",
          "field_b_term": "MERA (Multi-scale Entanglement Renormalization Ansatz) hierarchical tensor network (mathematics)",
          "note": "MERA maps the RG flow of a CFT onto a holographic network where each layer represents a coarse-graining step"
        },
        {
          "field_a_term": "topological order and anyon excitations (quantum physics)",
          "field_b_term": "PEPS with non-trivial virtual symmetry transformations (mathematics)",
          "note": "Topological ground states are characterized by the virtual symmetry group of the tensor network"
        }
      ],
      "references": [
        {
          "doi": "10.1103/RevModPhys.77.259",
          "note": "Vidal (2007) - MERA and entanglement renormalization connecting quantum criticality to tensor networks"
        },
        {
          "doi": "10.1103/PhysRevLett.91.147902",
          "note": "Vidal (2003) - efficient simulation of 1D quantum systems using MPS / DMRG"
        },
        {
          "doi": "10.1103/PhysRevB.73.085115",
          "note": "Verstraete & Cirac (2006) - PEPS for 2D quantum systems and area law"
        }
      ],
      "last_reviewed": "2026-05-07",
      "file": "cross-domain/quantum-physics-mathematics/b-entanglement-tensor-network-states.yaml"
    },
    {
      "id": "b-representation-theory-particles",
      "title": "The classification of all elementary particles follows from the representation theory of the Poincaré group (Wigner 1939) — particle spin is the label of the irreducible representation of SU(2), the Standard Model gauge group SU(3)×SU(2)×U(1) determines all allowed interactions via group representations, and every conserved quantum number corresponds to a generator of a symmetry Lie group.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "Wigner (1939) proved that every quantum mechanical particle corresponds to an irreducible unitary representation of the Poincaré group (the symmetry group of special relativity: translations + Lorentz transformations). This is one of the deepest results in mathematical physics: the classification of",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-lie-group-beyond-standard-model"
      ],
      "communication_gap": "Wigner (1939) published his classification in Annals of Mathematics — pure mathematics, not physics. The physical interpretation was understood by quantum field theorists immediately but is not taught at the undergraduate level in physics programs. Most physics students encounter particles phenomenologically (the particle zoo) without learning that their properties are algebraically derived from group theory. Conversely, mathematics students learning representation theory of Lie groups are not told that their results classify all possible elementary particles. The Standard Model Lagrangian, which packs the entire representation structure into one equation, is rarely decoded into its group-theoretic components in either physics or mathematics courses.\n",
      "translation_table": [
        {
          "field_a_term": "Irreducible unitary representation of Poincaré group",
          "field_b_term": "Elementary particle (with definite mass and spin)",
          "note": "Wigner's theorem: all stable particles are labelled by (m, s) = Casimir invariants of Poincaré group"
        },
        {
          "field_a_term": "Spin s (label of SU(2) representation)",
          "field_b_term": "Particle spin quantum number (0: scalar, 1/2: fermion, 1: vector, 2: tensor)",
          "note": "s labels the dimension 2s+1 representation of SU(2): electron (s=1/2), photon (s=1, helicity ±1)"
        },
        {
          "field_a_term": "Adjoint representation of gauge group G",
          "field_b_term": "Gauge bosons (carriers of fundamental forces)",
          "note": "Photon: adjoint of U(1); W, Z: adjoint of SU(2)×U(1); gluons: adjoint of SU(3) (dimension 8)"
        },
        {
          "field_a_term": "Generator T_a of Lie algebra g",
          "field_b_term": "Conserved charge (Noether current)",
          "note": "Electric charge = U(1) generator; isospin = SU(2) generators; colour = SU(3) generators"
        },
        {
          "field_a_term": "Representation of matter field under G",
          "field_b_term": "Interaction content of the particle",
          "note": "Quarks: (3, 2, 1/3) of SU(3)×SU(2)×U(1); electrons: (1, 2, -1); top quark: (3, 1, 4/3)"
        },
        {
          "field_a_term": "Group embedding G ⊂ H (GUT)",
          "field_b_term": "Unification of fundamental forces at high energy",
          "note": "SU(5) embeds SU(3)×SU(2)×U(1); representations unify quarks and leptons, predict proton decay"
        },
        {
          "field_a_term": "Character of representation χ(g) = Tr(ρ(g))",
          "field_b_term": "Partition function / path integral over field configurations",
          "note": "Characters encode the full structure of a representation; path integrals sum over all group representations"
        }
      ],
      "references": [
        {
          "doi": "10.2307/1968551",
          "note": "Wigner (1939) Ann Math 40:149 — on unitary representations of the inhomogeneous Lorentz group; particle classification theorem"
        },
        {
          "doi": "10.1007/BF01450497",
          "note": "Weyl (1929) Z Phys 56:330 — electron and gravitation; gauge principle and representations"
        },
        {
          "note": "Georgi (1999) Lie Algebras in Particle Physics, 2nd ed. (Westview Press) — standard particle physics group theory text",
          "url": "https://www.taylorfrancis.com/books/mono/10.1201/9780429499210"
        },
        {
          "note": "Zee (2010) Quantum Field Theory in a Nutshell, 2nd ed. (Princeton UP) — Wigner classification and gauge theory",
          "url": "https://press.princeton.edu/books/hardcover/9780691140346/quantum-field-theory-in-a-nutshell"
        },
        {
          "doi": "10.1016/0370-2693(74)90058-6",
          "note": "Gell-Mann & Ne'eman — Eightfold Way; SU(3) representation theory predicts hadron spectrum"
        }
      ],
      "last_reviewed": "2026-05-05",
      "file": "cross-domain/quantum-physics-mathematics/b-representation-theory-particles.yaml"
    },
    {
      "id": "b-berry-phase-x-polarization-parallel-transport-optics",
      "title": "Berry phase in quantum systems and Pancharatnam-Berry phase in polarization optics share a geometric parallel-transport structure: cyclic parameter changes accumulate phase from path geometry rather than local dynamical time alone.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "The common object is holonomy on a parameter space. Polarization optics offers visible interferometric demonstrations of geometric phase, while quantum mechanics supplies the broader adiabatic-phase language.\n",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-pancharatnam-loop-area-predicts-interferometric-phase-shifts"
      ],
      "communication_gap": "Quantum curricula emphasize adiabatic Hamiltonians, while optics labs often teach wave plates and polarization spheres without naming the same differential-geometric structure.\n",
      "translation_table": [
        {
          "field_a_term": "quantum state transported around parameter loop",
          "field_b_term": "polarization state transported on Poincare sphere",
          "note": "Both trace a loop in state space."
        },
        {
          "field_a_term": "Berry phase",
          "field_b_term": "Pancharatnam-Berry geometric phase",
          "note": "Phase is geometric rather than purely dynamical."
        },
        {
          "field_a_term": "holonomy",
          "field_b_term": "interferometric phase shift from closed polarization path",
          "note": "The differential-geometric structure is shared."
        }
      ],
      "references": [
        {
          "doi": "10.1098/rspa.1984.0023",
          "note": "Berry (1984) quantum phase factors accompanying adiabatic changes."
        },
        {
          "doi": "10.1098/rspa.1956.0036",
          "note": "Pancharatnam (1956) phase relations in polarized beams."
        }
      ],
      "last_reviewed": "2026-05-07",
      "file": "cross-domain/quantum-physics-optics/b-berry-phase-x-polarization-parallel-transport-optics.yaml"
    },
    {
      "id": "b-photon-antibunching-sub-poissonian",
      "title": "Photon antibunching is the quantum optical signature of sub-Poissonian statistics: the second-order coherence g⁽²⁾(0) < 1 certifies non-classical single-photon emission",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "The normalized second-order intensity correlation function g⁽²⁾(τ)= ⟨:I(t)I(t+τ):⟩/⟨I⟩² characterizes photon statistics. For coherent (classical) light g⁽²⁾(0)=1; for thermal light g⁽²⁾(0)=2; for a quantum single-photon emitter g⁽²⁾(0)=0 (antibunching). Antibunching g⁽²⁾(0)<1 is impossible in classi",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-photon-antibunching-sub-poissonian"
      ],
      "communication_gap": "Quantum optics formalism (density matrices, normally-ordered correlation functions, Glauber coherence theory) is not standard training for optical engineers or photonics researchers who design classical coherent systems. The connection between g⁽²⁾(0) and the Mandel Q parameter is well known within quantum optics but the statistical physics interpretation (sub-Poissonian as negative Fano factor) is rarely taught in statistics or signal processing curricula.\n",
      "translation_table": [
        {
          "field_a_term": "photon number state |n⟩ (Fock state) in quantum optics",
          "field_b_term": "sub-Poissonian counting distribution in classical statistics",
          "note": "Fock state |1⟩ has zero variance in photon number — perfect sub-Poissonian"
        },
        {
          "field_a_term": "g⁽²⁾(0) second-order coherence at zero delay",
          "field_b_term": "normalized factorial moment ⟨n(n-1)⟩/⟨n⟩² of photon counts",
          "note": "g⁽²⁾(0) < 1 is the operational definition of non-classical light"
        },
        {
          "field_a_term": "Hanbury Brown-Twiss experiment (beamsplitter + two detectors)",
          "field_b_term": "coincidence counting experiment measuring joint photon arrival statistics",
          "note": "HBT setup directly measures the time-delay histogram that gives g⁽²⁾(τ)"
        },
        {
          "field_a_term": "quantum dot exciton lifetime (radiative decay)",
          "field_b_term": "Poisson process rate parameter for photon emission events",
          "note": "Lifetime determines emission bandwidth; antibunching dip width ~ 2γ radiative rate"
        }
      ],
      "references": [
        {
          "doi": "10.1103/PhysRevLett.39.691",
          "note": "Kimble, Dagenais & Mandel (1977) Photon antibunching in resonance fluorescence. PRL 39:691 — first experimental observation"
        },
        {
          "doi": "10.1103/PhysRevA.6.2211",
          "note": "Glauber (1963) Coherent and incoherent states of radiation field. Phys Rev 131:2766"
        },
        {
          "doi": "10.1038/nature01086",
          "note": "Santori et al. (2002) Indistinguishable photons from a single-photon device. Nature 419:594"
        }
      ],
      "last_reviewed": "2026-05-07",
      "file": "cross-domain/quantum-physics-optics/b-photon-antibunching-sub-poissonian.yaml"
    },
    {
      "id": "b-quantum-dot-blinking-renewal-process",
      "title": "Quantum dot fluorescence intermittency (blinking) obeys power-law on-time and off-time distributions that follow a renewal process with Levy-stable statistics, connecting single-particle quantum physics to renewal theory and anomalous diffusion through the universal power-law trap model.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "Individual CdSe quantum dots exhibit binary fluorescence switching between bright (on) and dark (off) states. Empirically, P(t_on) ~ t^{-alpha} and P(t_off) ~ t^{-beta} with alpha, beta in (1, 2), meaning mean on/off times diverge - a hallmark of Levy-stable statistics and anomalous renewal processe",
      "open_unknowns": [],
      "related_hypotheses": [],
      "communication_gap": "Quantum dot experimentalists fit power laws to blinking data without engaging with the mathematical machinery of renewal theory and Levy processes; statisticians studying anomalous renewal processes have not systematically used QD blinking as a model system to test analytical predictions.\n",
      "translation_table": [
        {
          "field_a_term": "Levy-stable waiting time distribution (statistics)",
          "field_b_term": "power-law on/off time distribution in QD blinking (quantum physics)",
          "note": "P(t) ~ t^{-alpha} with 1 < alpha < 2; mean waiting time diverges"
        },
        {
          "field_a_term": "renewal process (statistics)",
          "field_b_term": "sequence of on/off switching events in a single QD (quantum physics)",
          "note": "Each blinking event is an inter-renewal interval; process lacks characteristic timescale"
        },
        {
          "field_a_term": "ergodicity breaking (statistics)",
          "field_b_term": "non-ergodic fluorescence time traces (quantum physics)",
          "note": "Time-average photon count converges to ensemble average only for measurement times >> mean trap time"
        },
        {
          "field_a_term": "anomalous diffusion exponent (statistics)",
          "field_b_term": "power-law exponent of trap depth distribution (quantum physics)",
          "note": "Exponential distribution of trap barriers E_b ~ Uniform generates power-law waiting times"
        }
      ],
      "references": [
        {
          "doi": "10.1038/376144a0",
          "note": "Nirmal et al. (1996) - fluorescence intermittency in single cadmium selenide nanocrystals"
        },
        {
          "doi": "10.1038/35098009",
          "note": "Kuno et al. (2001) - nonexponential blinking kinetics of single CdSe quantum dots"
        },
        {
          "doi": "10.1103/PhysRevLett.94.240602",
          "note": "Bel & Barkai (2005) - weak ergodicity breaking in the continuous time random walk"
        }
      ],
      "last_reviewed": "2026-05-07",
      "file": "cross-domain/quantum-physics-statistics/b-quantum-dot-blinking-renewal-process.yaml"
    },
    {
      "id": "b-resnet-x-histopathology-domain-shift-robustness",
      "title": "Residual learning bridges deep optimization stability and histopathology robustness under stain and scanner domain shift.",
      "source_domain": "",
      "target_domain": "",
      "status": "proposed",
      "bridge_claim": "Speculative analogy (to be empirically validated): residual blocks that stabilize very deep optimization can also stabilize representation transfer under histopathology stain variability when coupled with domain-aware normalization.",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-residual-feature-normalization-reduces-histology-site-shift-error"
      ],
      "communication_gap": "Deep-vision benchmarks often ignore acquisition pipeline variability that dominates pathology deployment error.",
      "translation_table": [
        {
          "field_a_term": "identity skip pathway",
          "field_b_term": "feature-preserving transfer under shift",
          "note": "Identity mappings reduce degradation from deeper transformations."
        },
        {
          "field_a_term": "residual refinement",
          "field_b_term": "stain-invariant feature correction",
          "note": "Residual updates can encode nuisance correction signals."
        },
        {
          "field_a_term": "deep feature hierarchy",
          "field_b_term": "multi-scale tissue morphology descriptors",
          "note": "Hierarchies capture nuclei-to-architecture patterns."
        }
      ],
      "references": [
        {
          "arxiv": "1512.03385",
          "note": "Deep Residual Learning for Image Recognition."
        },
        {
          "url": "https://www.cancer.gov/research/infrastructure/cancer-centers/pathology",
          "note": "Pathology workflow context for translational deployment."
        }
      ],
      "last_reviewed": "2026-05-08",
      "file": "cross-domain/radiology-machine-learning/b-resnet-x-histopathology-domain-shift-robustness.yaml"
    },
    {
      "id": "b-physics-informed-neural-operator-x-aftershock-field-evolution",
      "title": "Physics-informed neural operators bridge PDE-constrained learning and spatiotemporal aftershock field evolution modeling.",
      "source_domain": "",
      "target_domain": "",
      "status": "proposed",
      "bridge_claim": "Speculative analogy (to be empirically validated): Physics-informed neural-operator constraints can regularize aftershock field forecasts analogously to stress-transfer priors in statistical seismology models.",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-pino-aftershock-fields-improve-short-term-seismic-hazard-maps"
      ],
      "communication_gap": "Domain operators prioritize interpretable reliability diagnostics, while ML work often prioritizes aggregate accuracy without deployment-grade uncertainty audits.",
      "translation_table": [
        {
          "field_a_term": "model prior",
          "field_b_term": "domain prior",
          "note": "Both constrain inference in data-sparse regimes."
        },
        {
          "field_a_term": "uncertainty estimate",
          "field_b_term": "risk-aware decision support",
          "note": "Uncertainty quality determines practical utility."
        },
        {
          "field_a_term": "out-of-distribution behavior",
          "field_b_term": "deployment robustness",
          "note": "Shift sensitivity governs real-world reliability."
        }
      ],
      "references": [
        {
          "arxiv": "2111.03794",
          "note": "Physics-informed neural operator methodology."
        }
      ],
      "last_reviewed": "2026-05-08",
      "file": "cross-domain/seismology-machine-learning/b-physics-informed-neural-operator-x-aftershock-field-evolution.yaml"
    },
    {
      "id": "b-hawkes-self-excitation-x-seizure-aftershock-clustering",
      "title": "Hawkes self-exciting point processes unify earthquake aftershock clustering and seizure-burst event cascades.",
      "source_domain": "",
      "target_domain": "",
      "status": "proposed",
      "bridge_claim": "Aftershocks and seizure bursts both show event-triggered increases in short-term event intensity. Hawkes branching structure provides a common language for estimating endogenous cascade risk versus exogenous driving.",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-hawkes-branching-threshold-predicts-seizure-clusters"
      ],
      "communication_gap": "Adjacent communities use different software stacks and validation norms, so mathematically equivalent tools are often rediscovered in parallel.",
      "translation_table": [
        {
          "field_a_term": "Omori-Utsu aftershock decay",
          "field_b_term": "Post-ictal event tail",
          "note": "Both can be represented via decaying triggering kernels."
        },
        {
          "field_a_term": "Branching ratio eta",
          "field_b_term": "Seizure cascade gain",
          "note": "eta near 1 marks fragile near-critical dynamics."
        },
        {
          "field_a_term": "Background rate mu",
          "field_b_term": "Baseline excitability",
          "note": "Separates spontaneous from triggered events."
        }
      ],
      "references": [
        {
          "doi": "10.1088/1742-5468/2011/12/P12028",
          "note": "Bacry et al. Hawkes-process estimation and applications."
        },
        {
          "doi": "10.1007/s00780-015-0282-y",
          "note": "Hardiman et al. critical reflexivity / branching-ratio analysis."
        }
      ],
      "last_reviewed": "2026-05-08",
      "file": "cross-domain/seismology-neuroscience/b-hawkes-self-excitation-x-seizure-aftershock-clustering.yaml"
    },
    {
      "id": "b-earthquake-source-dislocation-theory",
      "title": "Earthquake source mechanics is formally equivalent to dislocation theory in solid mechanics: seismic moment tensors describe the equivalent force system of a shear crack (dislocation) on a fault plane, and radiated seismic wavefields are computed as the elastic Green's function response to dislocation propagation\n",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "An earthquake rupture is a propagating shear dislocation on a fault surface: the moment tensor M_ij = μ A d (n_i d_j + n_j d_i) (μ = shear modulus, A = rupture area, d = slip, n = fault normal, d = slip direction) is mathematically identical to the elastic dipole tensor of a dislocation loop in soli",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-dislocation-nucleation-length-predicts-mainshock-magnitude"
      ],
      "communication_gap": "Seismologists analyze waveforms and infer fault parameters while solid mechanicists study crystal dislocation dynamics in materials; although the mathematical equivalence was established by Burridge and Knopoff (1964) and Aki (1966), the dislocation theory formalism is rarely used outside earthquake source theory and is not part of standard solid mechanics education.\n",
      "translation_table": [
        {
          "field_a_term": "seismic moment tensor M_ij (seismology)",
          "field_b_term": "elastic dipole tensor of dislocation loop in a medium (solid mechanics)",
          "note": "M_ij = μ[d_i n_j + d_j n_i] A exactly matches the dipole tensor of a Volterra dislocation with Burgers vector b = d·A"
        },
        {
          "field_a_term": "fault slip distribution u(x,t) (seismology)",
          "field_b_term": "dislocation density distribution and propagation velocity on a glide plane (solid mechanics)",
          "note": "Heterogeneous fault slip maps to spatially variable Burgers vector density; slip rate = dislocation velocity"
        },
        {
          "field_a_term": "seismic wave radiation (seismology)",
          "field_b_term": "elastic wave emission from accelerating dislocation line (solid mechanics)",
          "note": "Far-field P and S wave amplitudes from fault rupture are identical to radiation from moving dislocations (Bremsstrahlung analogy)"
        },
        {
          "field_a_term": "stress drop Δσ on fault (seismology)",
          "field_b_term": "elastic stress release during dislocation glide (solid mechanics)",
          "note": "Stress drop is the reduction in shear stress across the fault plane after rupture, equivalent to unloading stress at a dislocation glide plane"
        }
      ],
      "references": [
        {
          "doi": "10.1785/BSSA0560010125",
          "note": "Aki (1966) - generation and propagation of G waves from the Niigata earthquake"
        },
        {
          "doi": "10.1785/BSSA0540061875",
          "note": "Burridge & Knopoff (1964) - body force equivalents for seismic dislocations"
        }
      ],
      "last_reviewed": "2026-05-07",
      "file": "cross-domain/seismology-physics/b-earthquake-source-dislocation-theory.yaml"
    },
    {
      "id": "b-seismology-percolation",
      "title": "Earthquake fault networks exhibit Gutenberg-Richter power-law magnitude-frequency distributions because fault systems self-organize to the percolation critical point, making seismic hazard a direct application of percolation criticality theory.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "proposed",
      "bridge_claim": "The Gutenberg-Richter law (log N = a - b*M, where N is the number of earthquakes exceeding magnitude M and b ≈ 1 universally) is the earthquake community's empirical observation that seismic energy release follows a power law spanning many decades. This is not a property of rock physics alone—it eme",
      "open_unknowns": [],
      "related_hypotheses": [],
      "communication_gap": "Seismologists develop Gutenberg-Richter statistics empirically and use them for probabilistic seismic hazard analysis without requiring a physical derivation. Statistical physicists studying percolation rarely have access to seismic catalog data and do not typically investigate geophysical applications. The mathematical connection has been noted in the theoretical seismology literature (Sornette, Main, Turcotte) but has not been adopted into standard seismic hazard practice, partly because the fault connectivity data needed to test the percolation hypothesis at regional scale are not routinely compiled.\n",
      "translation_table": [
        {
          "field_a_term": "Fault network (seismology)",
          "field_b_term": "Random bond lattice (percolation theory)"
        },
        {
          "field_a_term": "Earthquake magnitude-frequency (GR law, b-value)",
          "field_b_term": "Percolation cluster size distribution (power-law exponent τ)"
        },
        {
          "field_a_term": "b = 1 universal GR exponent",
          "field_b_term": "τ ≈ 2.05 percolation critical exponent (via moment-area scaling)"
        },
        {
          "field_a_term": "High-stress fault zone (b < 1)",
          "field_b_term": "Super-critical percolation (spanning cluster exists, large events dominate)"
        },
        {
          "field_a_term": "Immature, disconnected fault system (b > 1)",
          "field_b_term": "Sub-critical percolation (only small clusters, no spanning cluster)"
        },
        {
          "field_a_term": "Foreshock b-value decrease before mainshock",
          "field_b_term": "Correlation length divergence approaching percolation critical point"
        }
      ],
      "references": [
        {
          "doi": "10.1029/95JB01091",
          "note": "Sornette & Sammis (1995) - Complex critical exponents from renormalization group theory of earthquakes; percolation connection"
        },
        {
          "doi": "10.1029/92JB01381",
          "note": "Main (1992) - Damage mechanics model for power-law statistics in seismicity; percolation-based derivation of b-value"
        },
        {
          "doi": "10.1007/BF00874612",
          "note": "Turcotte (1992) - Fractals and chaos in geology and geophysics; GR law as fractal percolation"
        }
      ],
      "last_reviewed": "2026-05-05",
      "file": "cross-domain/seismology-physics/b-seismology-percolation.yaml"
    },
    {
      "id": "b-seismograph-matched-filter-cross-correlation",
      "title": "Seismic signal detection uses matched filtering and cross-correlation from signal processing theory: a template waveform from a known event is cross-correlated with continuous seismic recordings to detect repeating earthquakes at signal-to-noise ratios far below the detection threshold of traditional STA/LTA methods.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "The matched filter is the optimal linear filter for detecting a known signal s(t) in white Gaussian noise: h(t) = s(T-t) (time-reversed template). The output cross-correlation C(τ) = ∫s(t)·x(t+τ)dt achieves the maximum SNR = 2E/N₀ at detection time (E = signal energy, N₀ = noise power spectral densi",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-deep-learning-seismic-detection-vs-matched-filter-completeness"
      ],
      "communication_gap": "Seismologists and signal processing engineers share cross-correlation mathematics but developed parallel terminologies and communities; modern earthquake seismology increasingly recruits signal processing expertise, but template-matching seismology as a formal matched-filter application was only systematically developed from the 2000s onward, leaving gaps in applying modern signal detection theory to seismic monitoring.\n",
      "translation_table": [
        {
          "field_a_term": "template event waveform (seismology)",
          "field_b_term": "matched filter template s(t) (signal processing)",
          "note": "Known reference earthquake waveform acts as the optimal detection filter"
        },
        {
          "field_a_term": "seismic catalog detection threshold (seismology)",
          "field_b_term": "minimum detectable signal level / SNR threshold (signal processing)",
          "note": "Matched filtering lowers the detection threshold by ~1.5-2 magnitude units"
        },
        {
          "field_a_term": "cross-correlation coefficient CC (seismology)",
          "field_b_term": "normalized cross-correlation output (signal processing)",
          "note": "CC ∈ [-1,1] measures waveform similarity; high CC identifies repeating earthquakes"
        },
        {
          "field_a_term": "differential travel time δt (seismology)",
          "field_b_term": "time delay estimation (signal processing)",
          "note": "Sub-sample timing from cross-correlation peak enables high-precision hypocenter relocation"
        }
      ],
      "references": [
        {
          "doi": "10.1785/0120080010",
          "note": "Gibbons & Ringdal (2006) - detection of low-magnitude seismic events using matched filters"
        },
        {
          "doi": "10.1126/science.1228330",
          "note": "Shelly et al. (2007) - non-volcanic tremor and low-frequency earthquakes detected by template matching"
        },
        {
          "doi": "10.1785/0220120049",
          "note": "Chamberlain et al. (2014) - EQcorrscan open-source matched filter seismology"
        }
      ],
      "last_reviewed": "2026-05-07",
      "file": "cross-domain/seismology-signal-processing/b-seismograph-matched-filter-cross-correlation.yaml"
    },
    {
      "id": "b-earthquake-aftershocks-omori-utsu-etas",
      "title": "Earthquake aftershock sequences obey the Omori-Utsu power law and are modeled by the ETAS (Epidemic Type Aftershock Sequence) point process — a self-exciting Hawkes process that maps seismicity onto the statistical physics of critical branching processes and second-order phase transitions.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "The rate of aftershocks decays as r(t) ∝ (t+c)^(-p) (Omori-Utsu law, p≈1), and the ETAS model extends this to a branching process where each earthquake triggers offspring at rate K·10^(α·M). Near the critical branching ratio n=1, ETAS exhibits power-law cluster-size distributions (Gutenberg-Richter)",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-etas-branching-ratio-seismic-hazard-forecast"
      ],
      "communication_gap": "Seismologists and statistical physicists working on self-organized criticality have collaborated since Bak et al. (1987), but the SOC paradigm is contested in seismology; most operational seismic hazard assessment uses empirical ETAS without invoking the critical-point analogy.\n",
      "translation_table": [
        {
          "field_a_term": "aftershock rate decay r(t) (seismology)",
          "field_b_term": "memory kernel of Hawkes process (statistical physics)",
          "note": "Omori power-law decay is the kernel function of the self-exciting point process"
        },
        {
          "field_a_term": "Gutenberg-Richter b-value / magnitude distribution (seismology)",
          "field_b_term": "power-law event-size distribution at criticality (statistical physics)",
          "note": "G-R law implies scale-free magnitude distribution consistent with SOC critical state"
        },
        {
          "field_a_term": "branching ratio n (mean triggered offspring per earthquake) (seismology)",
          "field_b_term": "branching ratio of critical Galton-Watson process (statistical physics)",
          "note": "n<1 subcritical (finite aftershock sequence); n→1 critical (power-law clusters)"
        },
        {
          "field_a_term": "mainshock-aftershock productivity (seismology)",
          "field_b_term": "supercriticality parameter above phase transition (statistical physics)",
          "note": "Large mainshocks push the local branching ratio above 1 transiently"
        }
      ],
      "references": [
        {
          "doi": "10.1007/BF01964975",
          "note": "Ogata (1988) — statistical models for earthquake occurrences (ETAS formulation)"
        },
        {
          "doi": "10.1029/2000JB000163",
          "note": "Helmstetter & Sornette (2002) — subcritical and supercritical branching in ETAS"
        },
        {
          "doi": "10.1103/PhysRevLett.59.381",
          "note": "Bak, Tang & Wiesenfeld (1987) — self-organized criticality and 1/f noise"
        }
      ],
      "last_reviewed": "2026-05-07",
      "file": "cross-domain/seismology-statistical-physics/b-earthquake-aftershocks-omori-utsu-etas.yaml"
    },
    {
      "id": "b-earthquake-alarm-decision-x-wald-sequential-probability-ratio-test",
      "title": "Earthquake early warning public alerting is not pure estimation: stakeholders face sequential decisions under latency — Wald’s sequential probability ratio test formalizes threshold policies balancing false alarms and misses, complementing recursive Bayesian magnitude tracking (seismology ↔ sequential hypothesis testing).\n",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "EEW systems trigger alerts when predicted shaking exceeds thresholds at sites with lead time > desired seconds. Wald’s SPRT analyzes sequential likelihood ratios until crossing boundaries A,B controlling type-I/type-II error rates with minimal expected sample size under i.i.d. assumptions (often vio",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-aftershock-clustering-inflates-sprt-false-alarm-rate-fixed-boundaries"
      ],
      "communication_gap": "Operations centers speak thresholds and polygon alerts; mathematical statistics curricula teach Wald stopping—cross-training between agencies and academic statistics remains uneven.\n",
      "translation_table": [
        {
          "field_a_term": "likelihood ratio Λ_n after each incoming waveform snippet",
          "field_b_term": "Wald statistic compared to stopping boundaries",
          "note": "Real EEW uses heuristic thresholds and ensemble voting; SPRT is idealized baseline."
        },
        {
          "field_a_term": "false alarm vs missed event societal costs",
          "field_b_term": "asymmetric boundary selection in sequential tests",
          "note": "Civil protection emphasizes misses differently than laboratory QC applications."
        },
        {
          "field_a_term": "correlated aftershocks / nonstationary noise",
          "field_b_term": "SPRT i.i.d. assumptions break — requires martingale or robust variants",
          "note": "Bridge_claim caveat — **direct SPRT optimality is contested** under correlated seismic data.\n"
        }
      ],
      "references": [
        {
          "doi": "10.1214/aoms/1177731118",
          "note": "Wald (1945) — sequential tests of statistical hypotheses (Ann. Math. Stat.)."
        },
        {
          "doi": "10.1146/annurev.earth.031208.100055",
          "note": "Allen et al. (2009) — EEW systems overview providing operational decision context (Annu. Rev. Earth Planet. Sci.)."
        }
      ],
      "last_reviewed": "2026-05-07",
      "file": "cross-domain/seismology-statistics/b-earthquake-alarm-decision-x-wald-sequential-probability-ratio-test.yaml"
    },
    {
      "id": "b-phase-retrieval-x-cryoem-orientation-inference",
      "title": "Phase-retrieval alternating-projection methods map onto cryo-EM orientation and reconstruction inference loops.",
      "source_domain": "",
      "target_domain": "",
      "status": "proposed",
      "bridge_claim": "Speculative analogy: Phase-retrieval alternating-projection methods map onto cryo-EM orientation and reconstruction inference loops.",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-alternating-projection-warm-starts-reduce-cryoem-orientation-assignment-errors"
      ],
      "communication_gap": "The two communities use different notation, benchmarks, and publication venues, which obscures transferable mathematical structure.",
      "translation_table": [],
      "references": [
        {
          "doi": "10.1109/TIT.1982.1056489",
          "note": "Fienup phase retrieval algorithms."
        }
      ],
      "last_reviewed": "2026-05-08",
      "file": "cross-domain/signal-processing-structural-biology/b-phase-retrieval-x-cryoem-orientation-inference.yaml"
    },
    {
      "id": "b-social-ising-polarisation",
      "title": "Political polarisation dynamics in networked populations are mathematically equivalent to the Ising model ferromagnetic phase transition, with partisan identity as spin, echo chambers as ferromagnetic domains, and social influence strength as inverse temperature.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "proposed",
      "bridge_claim": "The Ising model describes how local alignment interactions between magnetic spins produce global ordered phases (ferromagnetism) or disordered phases (paramagnetism) depending on temperature. Political polarisation shows strikingly similar dynamics: individuals align their views with social neighbou",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-polarisation-ising-phase-transition"
      ],
      "communication_gap": "Political scientists rarely engage statistical physics literature; physicists studying opinion dynamics rarely engage the substantive political science literature on polarisation mechanisms. Sociophysics models of opinion dynamics exist but are not integrated with empirical political science data. The Ising-polarisation analogy is qualitatively known in sociophysics but has not been operationalised with real partisan identity survey data and tested against the renormalisation group predictions.\n",
      "translation_table": [
        {
          "field_a_term": "spin (up or down)",
          "field_b_term": "partisan identity (Democrat or Republican)",
          "note": "The binary state variable; extensions to Potts model can handle multi-party systems"
        },
        {
          "field_a_term": "exchange coupling J",
          "field_b_term": "social influence strength (peer pressure, media consumption homophily)",
          "note": "The tendency to align with neighbours; heterogeneous J maps to variable influence susceptibility"
        },
        {
          "field_a_term": "temperature T",
          "field_b_term": "inverse of social conformity pressure (openness to persuasion)",
          "note": "High T = disordered opinions; low T = locked-in partisan alignment"
        },
        {
          "field_a_term": "ferromagnetic phase transition at T_c",
          "field_b_term": "polarisation tipping point (sudden shift from pluralist to sorted society)",
          "note": "Below T_c, spontaneous magnetisation appears — below the political threshold, partisan sorting locks in"
        },
        {
          "field_a_term": "external magnetic field H",
          "field_b_term": "elite messaging / media framing (directional push on population opinion)",
          "note": "H breaks symmetry and can trigger or prevent magnetisation in one direction"
        },
        {
          "field_a_term": "domain walls",
          "field_b_term": "geographical and social boundaries between partisan communities",
          "note": "Domain walls are costly (frustration) in the Ising model; partisan boundaries involve costly cross-cutting ties"
        }
      ],
      "references": [],
      "last_reviewed": "2026-05-05",
      "file": "cross-domain/social-physics/b-social-ising-polarisation.yaml"
    },
    {
      "id": "b-behavioral-immunology-pathogen-avoidance",
      "title": "Schaller's behavioral immune system (BIS) — evolved disgust-based pathogen avoidance using false-positive-biased detection — predicts cross-national correlations between historical pathogen prevalence and collectivism, sexual conservatism, and xenophobia, mapping to Neyman-Pearson Type I/II error trade-offs in signal detection theory.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "The biological immune system responds to pathogens after infection, with latency of days to weeks. The behavioral immune system (Schaller & Park 2011) is a suite of cognitive-motivational mechanisms that detect pathogen cues before infection and activate prophylactic behavioral avoidance — a faster ",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-bis-disgust-threshold-pathogen-prevalence-calibration"
      ],
      "communication_gap": "Behavioral immune system research is published primarily in social psychology and evolutionary psychology journals (Psych Science, Evol Hum Behav, J Pers Soc Psych), while immunological research on the same system is in immunology journals (J Immunol, Immunity). Epidemiologists who study pathogen prevalence rarely engage with cross-national personality research. Political scientists studying anti-immigration attitudes are largely unaware of the BIS literature. The signal-detection theory framing (False Positive / False Negative optimal threshold) is known to psychologists but the explicit connection to Neyman-Pearson statistics requires simultaneous expertise in decision theory and evolutionary psychology.\n",
      "translation_table": [
        {
          "field_a_term": "immune system pathogen recognition receptor (PRR, e.g., TLR4)",
          "field_b_term": "disgust elicitor cue detection system (perceptual pathogen detector)"
        },
        {
          "field_a_term": "innate immune false-positive (mount immune response to non-pathogen)",
          "field_b_term": "BIS false positive (social avoidance of uninfected person with stigma cue)"
        },
        {
          "field_a_term": "pathogen avoidance (immunological tolerance vs. elimination)",
          "field_b_term": "behavioral avoidance vs. engagement (social exclusion vs. inclusion)"
        },
        {
          "field_a_term": "Neyman-Pearson detection threshold θ (H₀/H₁ boundary)",
          "field_b_term": "disgust sensitivity trait (individual variation in BIS activation threshold)"
        },
        {
          "field_a_term": "ROC curve (sensitivity vs. specificity trade-off)",
          "field_b_term": "BIS calibration trade-off: xenophobia cost vs. infection avoidance benefit"
        }
      ],
      "references": [
        {
          "doi": "10.1177/0963721411402669",
          "note": "Schaller & Park (2011) Curr Dir Psychol Sci 20:99 — behavioral immune system: its evolution and social psychological implications"
        },
        {
          "doi": "10.1098/rspb.2008.0094",
          "note": "Fincher et al. (2008) Proc R Soc B 275:1279 — pathogen prevalence predicts human cross-cultural variability in individualism/collectivism"
        },
        {
          "doi": "10.1037/0022-3514.76.4.574",
          "note": "Rozin et al. (1999) J Pers Soc Psychol 76:574 — the CAD triad hypothesis of disgust"
        },
        {
          "doi": "10.1111/j.1467-6494.2011.00730.x",
          "note": "Murray et al. (2011) J Pers 79:1211 — pathogens and politics: further evidence that parasite prevalence predicts authoritarianism"
        }
      ],
      "last_reviewed": "2026-05-06",
      "file": "cross-domain/social-science-biology/b-behavioral-immunology-pathogen-avoidance.yaml"
    },
    {
      "id": "b-chronobiology-social-jet-lag",
      "title": "Social jet lag bridges chronobiology and social science: the mismatch between biological clock timing (TTFL circadian mechanism, CRY1/PER3 variants) and social schedule timing (school start times, work hours) creates measurable health and performance deficits across populations.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "Social jet lag (Roenneberg 2012) quantifies the discrepancy between biological and social time as the difference in sleep midpoint (MSF = midsleep on free days) between work days and free days. Population chronotype distribution is approximately normal with peak MSF ≈ 3:30 AM; \"owls\" (late chronotyp",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-delayed-school-start-improves-adolescent-outcomes-causally"
      ],
      "communication_gap": "Chronobiologists who study circadian mechanisms in mouse models and human genetics rarely engage with education policy researchers who study school performance. The epidemiological evidence for social jet lag health effects (obesity, metabolic syndrome, depression, cardiovascular disease) is published in sleep medicine and public health journals; the mechanistic biology is in Cell and Nature. The AAP policy recommendation was driven by the epidemiology but rarely cites the TTFL molecular mechanisms.\n",
      "translation_table": [
        {
          "field_a_term": "social jet lag (MSF_work - MSF_free)",
          "field_b_term": "circadian misalignment (social schedule ≠ intrinsic period phase)",
          "note": "quantifies the gap between social demands and biological circadian phase"
        },
        {
          "field_a_term": "chronotype distribution (normal, peak MSF 3:30 AM)",
          "field_b_term": "population distribution of circadian period and phase-of-entrainment",
          "note": "late chronotypes have longer intrinsic periods → delayed phase of entrainment"
        },
        {
          "field_a_term": "adolescent phase delay (~2h)",
          "field_b_term": "pubertal regulation of melatonin onset and clock gene expression",
          "note": "a biological developmental shift that conflicts with early school schedules"
        },
        {
          "field_a_term": "school start time policy",
          "field_b_term": "external zeitgeber strength and phase (social schedule as light-dark proxy)",
          "note": "institutional scheduling operates as a powerful environmental time cue"
        },
        {
          "field_a_term": "CRY1/PER3 polymorphisms (chronotype genetics)",
          "field_b_term": "TTFL clock component variants altering intrinsic period",
          "note": "population genetic variation creates the chronotype distribution"
        }
      ],
      "references": [
        {
          "doi": "10.1016/j.cub.2012.03.038",
          "note": "Roenneberg et al. (2012) Social jetlag and obesity; Curr Biol 22:939"
        },
        {
          "doi": "10.3109/07420520600030503",
          "note": "Wittmann et al. (2006) Social jetlag — misalignment of biological and social time; Chronobiol Int 23:497"
        },
        {
          "note": "Carskadon et al. (1998) Pubertal changes in daytime sleepiness; Sleep 21:1"
        },
        {
          "doi": "10.1542/peds.2014-1697",
          "note": "AAP (2014) School start times for adolescents; Pediatrics 134:642"
        }
      ],
      "last_reviewed": "2026-05-07",
      "file": "cross-domain/social-science-biology/b-chronobiology-social-jet-lag.yaml"
    },
    {
      "id": "b-cultural-evolution-darwinian",
      "title": "Cultural evolution is formally isomorphic to biological evolution — memes are replicators subject to transmission, variation, and selection; the Price equation governs both gene frequency change and cultural trait change; and replicator dynamics describe both biological fitness and cultural payoff — making evolutionary theory a universal framework for any inherited-variation- selection system.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "Cavalli-Sforza & Feldman (1981) and Boyd & Richerson (1985) independently formalised cultural evolution as a Darwinian process with explicit analogies to population genetics. The formal structure is:\n1. Price equation governs both. Price (1970, Nature 227:520) derived the\n   fundamental equation of ",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-price-equation-cultural-trait-frequency"
      ],
      "communication_gap": "Evolutionary biologists and cultural anthropologists have historically been suspicious of each other: biologists fear \"just-so stories\" about culture; anthropologists fear biological determinism and reductionism. The formal (mathematical) evolutionary approach of Cavalli-Sforza, Boyd & Richerson, and Henrich has been productive but remains a minority approach in cultural anthropology. Sociologists largely ignore evolutionary formalisms. The Price equation is standard in biology but unknown in most social science programmes.\n",
      "translation_table": [
        {
          "field_a_term": "gene (biology)",
          "field_b_term": "meme / cultural trait (social science)"
        },
        {
          "field_a_term": "DNA replication fidelity",
          "field_b_term": "cultural transmission fidelity (imitation accuracy)"
        },
        {
          "field_a_term": "natural selection (differential reproduction)",
          "field_b_term": "cultural selection (differential adoption / prestige bias)"
        },
        {
          "field_a_term": "genetic drift (random allele frequency change in small populations)",
          "field_b_term": "cultural drift (random trait frequency change in small groups)"
        },
        {
          "field_a_term": "Price equation (Cov(w,z))",
          "field_b_term": "cultural Price equation (Cov(adoption rate, trait value))"
        },
        {
          "field_a_term": "horizontal gene transfer (bacteria)",
          "field_b_term": "horizontal cultural transmission (peer learning)"
        },
        {
          "field_a_term": "Eigen error threshold (molecular evolution)",
          "field_b_term": "critical group size for cumulative cultural evolution (Henrich)"
        },
        {
          "field_a_term": "replicator dynamics (evolutionary game theory)",
          "field_b_term": "cultural variant frequency dynamics (social learning models)"
        }
      ],
      "references": [
        {
          "url": "https://press.princeton.edu/books/paperback/9780691082837/cultural-transmission-and-evolution",
          "note": "Cavalli-Sforza & Feldman (1981) Cultural Transmission and Evolution — Princeton University Press"
        },
        {
          "url": "https://press.uchicago.edu/ucp/books/book/chicago/C/bo5970597.html",
          "note": "Boyd & Richerson (1985) Culture and the Evolutionary Process — University of Chicago Press"
        },
        {
          "doi": "10.1038/227520a0",
          "note": "Price (1970) Selection and covariance — Price equation, Nature 227:520"
        },
        {
          "url": "https://www.selfish-gene.co.uk",
          "note": "Dawkins (1976) The Selfish Gene — Oxford University Press, meme concept"
        },
        {
          "doi": "10.1016/j.jtbi.2003.08.001",
          "note": "Henrich (2004) Demography and cultural evolution, J Theor Biol 228:197"
        }
      ],
      "last_reviewed": "2026-05-05",
      "file": "cross-domain/social-science-biology/b-cultural-evolution-darwinian.yaml"
    },
    {
      "id": "b-cultural-evolution-dual-inheritance",
      "title": "Boyd and Richerson's dual inheritance theory (1985) formalizes the coevolution of genes and culture using population genetics mathematics — cultural allele frequencies evolve under selection, drift, and transmission biases including conformity and prestige, with the Price equation applying equally to both genetic and cultural change.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "Dual inheritance theory (Boyd & Richerson 1985) treats culture as an inheritance system parallel to genetics. Cultural variants spread via selection (differential retention), unbiased transmission (random drift), and biased transmission. Prestige bias: copy high-status individuals. Conformity bias (",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-dual-inheritance-lactase-selection"
      ],
      "communication_gap": "Cultural evolution and genetic evolution are studied by different departments (anthropology/social science vs biology/genetics) with different methods, journals, and norms. The mathematical equivalence is accepted in evolutionary anthropology but rarely reaches sociology, cultural psychology, or mainstream economics. Resistance stems partly from the perceived reduction of culture to biology, even though dual inheritance theory explicitly treats culture as an autonomous inheritance system.\n",
      "translation_table": [
        {
          "field_a_term": "cultural variant (belief, practice, norm)",
          "field_b_term": "allele at a locus in population genetics",
          "note": "both are heritable, variable, and subject to selection and drift"
        },
        {
          "field_a_term": "imitation/social learning fidelity",
          "field_b_term": "replication fidelity / mutation rate",
          "note": "low imitation fidelity acts like high mutation rate, preventing cumulative adaptation"
        },
        {
          "field_a_term": "prestige bias (copy high-status individuals)",
          "field_b_term": "positive frequency-independent selection (success-based bias)",
          "note": "prestige bias can produce maladaptive runaway analogous to sexual selection"
        },
        {
          "field_a_term": "conformity bias (overweight majority)",
          "field_b_term": "positive frequency-dependent selection",
          "note": "conformity stabilizes cultural norms and enables coordinated social behavior"
        },
        {
          "field_a_term": "cultural group selection",
          "field_b_term": "multilevel selection (Price equation between-group component)",
          "note": "controversial but potentially explains unusual ultrasociality of humans"
        }
      ],
      "references": [
        {
          "note": "Boyd & Richerson (1985) Culture and the Evolutionary Process; University of Chicago Press"
        },
        {
          "note": "Cavalli-Sforza & Feldman (1981) Cultural Transmission and Evolution; Princeton University Press"
        },
        {
          "note": "Richerson & Boyd (2005) Not by Genes Alone; University of Chicago Press"
        },
        {
          "doi": "10.1002/evan.10017",
          "note": "Henrich & McElreath (2003) The evolution of cultural evolution. Evol Anthropol 12:123–135"
        }
      ],
      "last_reviewed": "2026-05-06",
      "file": "cross-domain/social-science-biology/b-cultural-evolution-dual-inheritance.yaml"
    },
    {
      "id": "b-cultural-evolution-memetics",
      "title": "Cultural transmission exhibits the three conditions of Darwinian evolution — variation, heredity, and selection — making cultural change mathematically equivalent to population genetics and amenable to the same formal tools.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "Dawkins' meme concept (1976) proposed that cultural units (ideas, practices, norms) replicate, vary, and are selected — formally parallel to genes. Henrich (2004) formalised cultural transmission using the Price equation: the change in mean cultural trait frequency equals selection + transmission bi",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-cultural-replicator-dynamics-rate"
      ],
      "communication_gap": "Cultural evolution is studied in anthropology, archaeology, and psychology — disciplines that rarely engage formal population genetics mathematics. Evolutionary biologists who know the Price equation rarely apply it to cultural datasets. Dual inheritance theory is known in evolutionary anthropology but has not penetrated mainstream social science.\n",
      "translation_table": [
        {
          "field_a_term": "gene (biological replicator)",
          "field_b_term": "meme / cultural variant (cultural replicator)",
          "note": "Both carry information; both are replicated with fidelity and variation"
        },
        {
          "field_a_term": "mutation rate",
          "field_b_term": "innovation / copying error rate in cultural transmission",
          "note": "Low fidelity transmission is equivalent to high mutation; degradation of culture"
        },
        {
          "field_a_term": "natural selection (differential reproduction)",
          "field_b_term": "prestige bias / success bias (differential imitation)",
          "note": "High-status or successful individuals are preferentially copied — cultural selection"
        },
        {
          "field_a_term": "genetic drift (random sampling)",
          "field_b_term": "unbiased copying error / cultural drift",
          "note": "In small populations, random copying dominates over selection (cf. neutral theory)"
        },
        {
          "field_a_term": "epistasis (gene interaction)",
          "field_b_term": "cultural co-adaptation (meme complexes)",
          "note": "Memes that fit together replicate more successfully as a package (religion + ethics)"
        }
      ],
      "references": [
        {
          "note": "Dawkins, R. (1976). The Selfish Gene. Oxford University Press."
        },
        {
          "note": "Boyd, R. & Richerson, P.J. (1985). Culture and the Evolutionary Process. University of Chicago Press."
        },
        {
          "doi": "10.1525/aa.2004.106.2.395",
          "note": "Henrich, J. (2004). Demography and cultural evolution. Am Antiq 69:197."
        },
        {
          "note": "Mesoudi, A. (2011). Cultural Evolution. University of Chicago Press."
        }
      ],
      "last_reviewed": "2026-05-06",
      "file": "cross-domain/social-science-biology/b-cultural-evolution-memetics.yaml"
    },
    {
      "id": "b-moral-psychology-cooperation-game-theory",
      "title": "Moral intuitions of fairness (third-party punishment, inequity aversion) are quantitatively predicted by evolutionarily stable strategies in iterated public-goods games with altruistic punishment: the costly punishment instinct evolved to maintain cooperation in groups where purely self-interested free-riding would otherwise dominate.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "proposed",
      "bridge_claim": "Fehr & Gächter (2002) showed that humans will pay a personal cost to punish unfair players in one-shot public-goods games—a behaviour unexplained by standard self-interest models. Nowak & May (1992) and subsequent evolutionary simulations show that altruistic punishment is an ESS when group size and",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-punishment-threshold-ess-moral-universality"
      ],
      "communication_gap": "Moral psychologists (Jonathan Haidt's school) and evolutionary game theorists (Martin Nowak, Karl Sigmund) publish in different journals (Psychological Review, Journal of Personality and Social Psychology vs. Nature, PNAS) and rarely collaborate. The formal mapping from moral foundation universality to ESS robustness has not been derived.\n",
      "translation_table": [
        {
          "field_a_term": "Altruistic third-party punishment (paying to sanction defectors)",
          "field_b_term": "Costly punishment ESS in public-goods game",
          "note": "Both sacrifice payoff to maintain group cooperation; evolutionarily stable when c_punishment < (1-ε)Δcooperation"
        },
        {
          "field_a_term": "Inequity aversion (Fehr-Schmidt preference)",
          "field_b_term": "ESS fairness norm in ultimatum game",
          "note": "Ultimatum game rejection of unfair splits = enforcement of fairness ESS through credible threat"
        },
        {
          "field_a_term": "Moral disgust toward cheaters (Haidt moral foundation)",
          "field_b_term": "Emotional mechanism for maintaining conditional cooperation strategy",
          "note": "Disgust emotion reduces the cognitive load of defection detection; implemention of the TFT strategy"
        },
        {
          "field_a_term": "Cross-cultural universality of fairness intuitions",
          "field_b_term": "Cross-population robustness of cooperation ESS under diverse payoff structures",
          "note": "ESS that survives payoff variation = cultural universal; culture-specific norms = locally stable ESSs"
        }
      ],
      "references": [
        {
          "doi": "10.1126/science.1093133",
          "note": "Fehr & Gächter (2002) Science – altruistic punishment in public goods games; costly punishment instinct"
        },
        {
          "doi": "10.1038/359826a0",
          "note": "Nowak & May (1992) Nature – evolutionary games and spatial chaos; cooperation on graphs"
        },
        {
          "doi": "10.1126/science.1179721",
          "note": "Haidt (2007) Science – the new synthesis in moral psychology; moral foundations theory"
        },
        {
          "doi": "10.1038/s41586-019-1099-4",
          "note": "Henrich et al. – prosociality and punishment across societies; cross-cultural game theory"
        }
      ],
      "last_reviewed": "2026-05-07",
      "file": "cross-domain/social-science-biology/b-moral-psychology-cooperation-game-theory.yaml"
    },
    {
      "id": "b-social-learning-cultural-transmission",
      "title": "Social learning in human and animal populations follows the same population-genetic mathematics as cultural transmission: conformist bias maps to positive frequency dependence, prestige bias maps to fitness-dependent selection, and horizontal cultural transmission maps to gene flow, allowing the Price equation and selection gradient models to quantify cultural evolution",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "Cavalli-Sforza and Feldman (1981) and Boyd and Richerson (1985) showed that cultural transmission obeys equations isomorphic to population genetics: a cultural variant's frequency Δp = p(1-p)[w_1 - w_2]/w̄ under biased transmission maps directly to the selection gradient equation, conformist bias cr",
      "open_unknowns": [],
      "related_hypotheses": [],
      "communication_gap": "Social scientists study learning mechanisms empirically in laboratory and field settings while evolutionary biologists model cultural evolution mathematically; the isomorphism between cultural and genetic transmission frameworks is known in dual-inheritance theory but rarely used by mainstream social scientists to quantify learning dynamics.",
      "translation_table": [
        {
          "field_a_term": "social learning (social science)",
          "field_b_term": "horizontal gene transfer / cultural inheritance channel (evolutionary biology)",
          "note": "Vertical (parent-offspring), oblique (older to younger), and horizontal (same-generation) transmission map to genealogical structures in genetics"
        },
        {
          "field_a_term": "conformist bias — copying most common behavior (social science)",
          "field_b_term": "positive frequency-dependent selection (evolutionary biology)",
          "note": "Conformism: p' = p + β*p*(1-p)*(p - 0.5) with β > 0, identical to frequency-dependent selection in population genetics"
        },
        {
          "field_a_term": "prestige-biased copying — copying high-status individuals (social science)",
          "field_b_term": "fitness-proportionate selection on cultural traits (evolutionary biology)",
          "note": "Prestige bias maps to differential cultural fitness; high-status individuals' variants increase in frequency analogously to high-fitness genotypes"
        },
        {
          "field_a_term": "cultural drift in small populations (social science)",
          "field_b_term": "genetic drift via Moran process (evolutionary biology)",
          "note": "Cultural variant frequency fluctuations in small groups follow the same 1/(2N_e) drift formula"
        }
      ],
      "references": [
        {
          "doi": "10.2307/1685347",
          "note": "Cavalli-Sforza & Feldman (1981) - cultural transmission and evolution: original mathematical framework"
        },
        {
          "doi": "10.1017/CBO9780511815676",
          "note": "Boyd & Richerson (1985) - Culture and the Evolutionary Process: dual-inheritance theory"
        },
        {
          "doi": "10.1016/j.tree.2012.06.009",
          "note": "Rendell et al. (2011) - cognitive underpinnings of social learning and cultural transmission"
        }
      ],
      "last_reviewed": "2026-05-07",
      "file": "cross-domain/social-science-biology/b-social-learning-cultural-transmission.yaml"
    },
    {
      "id": "b-stress-biology-social-determinants",
      "title": "The biology of chronic stress bridges social science and biology: social determinants of health (employment, neighborhood, social status) are biologically embedded via the HPA axis, cortisol dysregulation, telomere shortening, and epigenetic modification — translating social inequality into measurable molecular and cellular damage.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "Allostatic load (McEwen & Stellar 1993): chronic activation of stress-response systems (HPA axis, sympathetic nervous system, immune system) causes cumulative physiological wear that manifests as elevated cortisol, dysregulated circadian cortisol rhythms, elevated inflammatory markers (IL-6, CRP, TN",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-telomere-length-social-gradient-reversibility"
      ],
      "communication_gap": "Social determinants of health research and molecular biology operate in largely separate worlds. Epidemiologists (Marmot, Wilkinson, Link) document the social gradient using survey data and mortality statistics; molecular biologists (McEwen, Epel, McGowan) study stress biochemistry in rodents and cell cultures. Clinical medicine treats individuals; public health treats populations. The integration — measuring biological markers in population cohorts and connecting them to social exposures — requires epidemiological design, molecular biology methods, and social science theory simultaneously, a combination rarely found in single research teams or funded by single funding agencies.\n",
      "translation_table": [
        {
          "field_a_term": "socioeconomic status (SES — income, education, occupation)",
          "field_b_term": "allostatic load score (composite of 10+ biomarkers: cortisol, BP, waist-hip ratio, CRP)",
          "note": "SES is measured at population level; allostatic load is the biological fingerprint of SES on the body"
        },
        {
          "field_a_term": "job control / autonomy (social science construct)",
          "field_b_term": "HPA axis reactivity to controllable vs. uncontrollable stressors",
          "note": "controllability determines whether cortisol response is adaptive (acute) or damaging (chronic)"
        },
        {
          "field_a_term": "adverse childhood experience (ACE score, 10-item questionnaire)",
          "field_b_term": "NR3C1 and FKBP5 methylation (glucocorticoid receptor gene epigenetic programming)",
          "note": "ACE score predicts methylation status 20-30 years later; biological embedding of social history"
        },
        {
          "field_a_term": "social isolation (epidemiological risk factor — relative risk ~1.5 for mortality)",
          "field_b_term": "elevated inflammatory cytokines IL-6, TNF-α, CRP (molecular immune activation)",
          "note": "Cacioppo's work shows loneliness up-regulates pro-inflammatory gene expression within hours"
        },
        {
          "field_a_term": "neighborhood poverty level (census tract)",
          "field_b_term": "hair cortisol concentration (3-month integrated cortisol exposure)",
          "note": "hair cortisol provides biological measurement of chronic psychosocial stressor exposure"
        }
      ],
      "references": [
        {
          "doi": "10.1056/NEJM199801153380307",
          "note": "McEwen (1998) Protective and damaging effects of stress mediators; N Engl J Med 338:171"
        },
        {
          "doi": "10.1016/S0140-6736(78)90005-1",
          "note": "Marmot et al. (1978) Employment grade and coronary heart disease in British civil servants; Lancet 312:1003"
        },
        {
          "doi": "10.1016/S0749-3797(98)00017-8",
          "note": "Felitti et al. (1998) Relationship of childhood abuse and household dysfunction to many of the leading causes of death in adults; Am J Prev Med 14:245"
        },
        {
          "doi": "10.1073/pnas.2112600101",
          "note": "Epel et al. (2004) Accelerated telomere shortening in response to life stress; PNAS 101:17312"
        }
      ],
      "last_reviewed": "2026-05-06",
      "file": "cross-domain/social-science-biology/b-stress-biology-social-determinants.yaml"
    },
    {
      "id": "b-drug-policy-pharmacoepidemiology",
      "title": "Pharmacoepidemiology bridges the molecular pharmacology of opioid receptor binding and the social epidemiology of the opioid crisis — harm reduction policies (naloxone distribution, methadone maintenance) derive their evidence base from both mu-receptor pharmacokinetics and population-level randomized trial data.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "Pharmacoepidemiology studies drug effects at the population level, connecting molecular pharmacology to public health policy. The opioid epidemic illustrates this bridge at scale: prescription opioid deaths triggered heroin substitution which triggered illicit fentanyl (100x more potent than morphin",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-psilocybin-rescheduling-neuroplasticity-evidence"
      ],
      "communication_gap": "Drug policy is primarily debated in political science, sociology, law, and public health literatures. Molecular pharmacologists who understand receptor binding and metabolism rarely engage in policy advocacy. The DEA scheduling process does not systematically integrate pharmacological evidence — it is primarily a legal and political process. The psilocybin scheduling controversy has helped bridge this gap by forcing explicit comparison of pharmacological evidence with scheduling criteria.\n",
      "translation_table": [
        {
          "field_a_term": "mu-opioid receptor binding affinity (Ki)",
          "field_b_term": "overdose reversal efficacy and required naloxone dose",
          "note": "Higher-potency opioids (fentanyl Ki < 1 nM) may require higher or repeated naloxone doses"
        },
        {
          "field_a_term": "drug half-life (t½)",
          "field_b_term": "dosing interval in treatment protocols — methadone once daily vs. heroin multiple times daily",
          "note": "Pharmacokinetics directly determines adherence burden and diversion risk"
        },
        {
          "field_a_term": "5-HT2A receptor agonism (psilocybin mechanism)",
          "field_b_term": "antidepressant effect — schedule I classification vs. clinical evidence",
          "note": "Molecular mechanism supports rescheduling; regulatory barrier is policy not pharmacology"
        },
        {
          "field_a_term": "harm reduction continuum",
          "field_b_term": "public health policy ranking by risk-benefit ratio",
          "note": "Policy tiers map onto pharmacological risk profiles — each tier has molecular rationale"
        },
        {
          "field_a_term": "opioid epidemic cascade (Rx → heroin → fentanyl)",
          "field_b_term": "pharmacological substitution driven by market and regulatory incentives",
          "note": "Fentanyl proliferation is partly driven by its greater potency-to-weight ratio (smuggling efficiency)"
        }
      ],
      "references": [
        {
          "doi": "10.1097/01.jom.0000479702.28107.52",
          "note": "Kolodny et al. (2015) J Opioid Manag 11:5 — opioid epidemic drivers"
        },
        {
          "note": "Mattick et al. (2009) Cochrane Database — methadone maintenance effectiveness"
        },
        {
          "doi": "10.1056/NEJMoa2032994",
          "note": "Carhart-Harris et al. (2021) N Engl J Med 384:1402 — psilocybin vs. escitalopram"
        },
        {
          "doi": "10.1016/S0140-6736(10)61462-6",
          "note": "Nutt et al. (2010) Lancet 376:1558 — drug harms ranking"
        }
      ],
      "last_reviewed": "2026-05-06",
      "file": "cross-domain/social-science-chemistry/b-drug-policy-pharmacoepidemiology.yaml"
    },
    {
      "id": "b-urban-ecology-ses",
      "title": "Urban ecosystems are novel socio-ecological assemblages governed by Ostrom's polycentric SES framework — heat islands shift phenology, intermediate disturbance maximises biodiversity, and green infrastructure delivers ecosystem services quantifiable in economic terms, making urban ecology the laboratory for coupled human-nature systems theory.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "Urban ecology bridges ecology and social science by studying cities as coupled socio-ecological systems (SES) where human governance decisions and ecological processes co-evolve and are mutually determining.\nUrban ecosystems as novel assemblages: cities create ecological communities with no natural ",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-green-infrastructure-urban-cooling-nonlinear-threshold"
      ],
      "communication_gap": "Ecology developed in natural systems (forests, grasslands, oceans) and urban ecology was not recognised as a legitimate sub-discipline until the late 1990s (McDonnell & Pickett 1990 first influential urban ecology synthesis). Social scientists (urban planners, sociologists, economists) built their urban models without ecological inputs. Ostrom's SES framework was developed from studies of fisheries and irrigation systems — its application to urban green infrastructure was not systematic until Elmqvist et al. (2013). Green infrastructure valuation remains contested because ecologists and economists use different units, time horizons, and uncertainty frameworks. The concept of \"nature-based solutions\" (EU policy language) has helped bridge planning and ecology but lacks rigorous ecological theory.\n",
      "translation_table": [
        {
          "field_a_term": "urban heat island intensity (ΔT_urban-rural)",
          "field_b_term": "phenological advancement (days per °C of warming)",
          "note": "UHI provides a natural experiment for climate change phenological effects"
        },
        {
          "field_a_term": "impervious surface cover (ISC %)",
          "field_b_term": "urbanisation gradient (ecological stress axis)",
          "note": "ISC is the standard physical measure of urbanisation used in ecology studies"
        },
        {
          "field_a_term": "species richness along urbanisation gradient",
          "field_b_term": "intermediate disturbance hypothesis (peaked diversity curve)",
          "note": "maximum diversity at intermediate ISC; high ISC selects for urban-adapted specialists"
        },
        {
          "field_a_term": "Ostrom's polycentric governance system",
          "field_b_term": "distributed urban green infrastructure management (community gardens, street tree stewardship)",
          "note": "polycentric governance outperforms central control for local ecological knowledge"
        },
        {
          "field_a_term": "ecosystem service valuation ($/year/hectare)",
          "field_b_term": "urban planning cost-benefit analysis for green vs. grey infrastructure",
          "note": "bridges ecological measurement to economic decision framework"
        },
        {
          "field_a_term": "luxury effect (vegetation cover correlated with household income)",
          "field_b_term": "environmental justice dimension of urban biodiversity distribution",
          "note": "connects ecological pattern to social inequality — biodiversity as equity issue"
        }
      ],
      "references": [
        {
          "doi": "10.1111/j.1461-0248.2008.01202.x",
          "note": "McDonnell & Hahs (2008) Ecol Lett 11:1290 — urbanisation gradient and ecological responses"
        },
        {
          "doi": "10.1126/science.1150195",
          "note": "Grimm et al. (2008) Science 319:756 — global change and the ecology of cities"
        },
        {
          "doi": "10.1126/science.1172133",
          "note": "Ostrom (2009) Science 325:419 — polycentric SES framework"
        },
        {
          "note": "Elmqvist et al. (2013) Urbanization, Biodiversity and Ecosystem Services. Springer, Dordrecht"
        }
      ],
      "last_reviewed": "2026-05-06",
      "file": "cross-domain/social-science-ecology/b-urban-ecology-ses.yaml"
    },
    {
      "id": "b-hci-cognitive-load",
      "title": "Human-computer interaction bridges social science (cognitive psychology) and engineering: Fitts' law, Hick's law, and cognitive load theory provide quantitative design constraints translating working memory limits and motor control psychology into interface engineering specifications for software, devices, and workplaces.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "Cognitive load theory (Sweller 1988): working memory has a capacity limit of approximately 7±2 chunks (Miller 1956) and can process 4±1 independent elements simultaneously in more recent estimates (Cowan 2001). Three load types: (1) Intrinsic load: inherent task complexity, determined by element int",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-fitts-law-bci-pointer-information-bandwidth-limit"
      ],
      "communication_gap": "Cognitive psychology (working memory research, dual-process theory) and engineering (UI design, software engineering) operate in largely separate academic communities. Academic HCI research (CHI conference) bridges these domains but is underfamiliar in both psychology departments (which rarely teach Fitts' law) and software engineering programs (which rarely teach cognitive psychology). Industry UX practice uses Fitts' law and cognitive load theory empirically but rarely engages with their information- theoretic foundations. Dark patterns research (a social science phenomenon of interface manipulation) is studied by behavioral economists and consumer protection researchers who rarely engage with the computational HCI literature on task modeling.\n",
      "translation_table": [
        {
          "field_a_term": "working memory capacity (7±2 chunks, Miller 1956)",
          "field_b_term": "UI chunking / grouping (visual hierarchy reduces simultaneous elements)",
          "note": "UI design directly maps to cognitive chunk count — good design keeps concurrent elements < 7"
        },
        {
          "field_a_term": "Fitts' law index of difficulty ID = log₂(2D/W) (bits)",
          "field_b_term": "motor channel capacity C = ID/MT (bits/second)",
          "note": "Fitts' law is Shannon's channel capacity theorem applied to the human motor system"
        },
        {
          "field_a_term": "Hick's law RT = b·log₂(n+1) (choice reaction time)",
          "field_b_term": "menu depth optimization (hierarchical menu: log depth beats flat breadth)",
          "note": "optimal menu structure minimizes RT = Hick time × depth = b·log₂(n^{1/k}) × k"
        },
        {
          "field_a_term": "cognitive schema (organized knowledge structure reducing working memory load)",
          "field_b_term": "UI affordance / convention (familiar interaction pattern requires no deliberate decoding)",
          "note": "conventional UI patterns (hamburger menu, scroll, pinch-to-zoom) offload working memory to long-term memory"
        },
        {
          "field_a_term": "GOMS/KLM task time prediction (expert performance model)",
          "field_b_term": "usability specification (measurable target: task time, error rate, learnability)",
          "note": "KLM provides pre-implementation performance specifications — testable engineering requirements"
        }
      ],
      "references": [
        {
          "doi": "10.1207/s15516709cog1202_4",
          "note": "Sweller (1988) Cognitive load during problem solving; Cogn Sci 12:257"
        },
        {
          "doi": "10.1037/h0043158",
          "note": "Miller (1956) The magical number seven, plus or minus two; Psychol Rev 63:81"
        },
        {
          "doi": "10.1037/h0055392",
          "note": "Fitts (1954) The information capacity of the human motor system in controlling the amplitude of movement; J Exp Psychol 47:381"
        },
        {
          "note": "Card, Moran & Newell (1983) The Psychology of Human-Computer Interaction. Lawrence Erlbaum Associates"
        }
      ],
      "last_reviewed": "2026-05-06",
      "file": "cross-domain/social-science-engineering/b-hci-cognitive-load.yaml"
    },
    {
      "id": "b-human-factors-system-safety",
      "title": "James Reason's Swiss Cheese model and Perrow's Normal Accident Theory connect social-science analysis of human error and organizational factors to engineering system safety design, explaining why accidents occur in tightly coupled complex systems and how High Reliability Organizations prevent them through mindful organizing and Crew Resource Management.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "James Reason's Swiss Cheese model (1990) formalizes how accidents occur when holes in multiple defensive layers (technical barriers, procedures, supervision, organization) align — combining active failures (human errors: slips, lapses, mistakes, violations) with latent conditions (organizational fac",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-swiss-cheese-alignment-accident-prediction"
      ],
      "communication_gap": "Engineering safety literature (fault trees, FMEA, reliability analysis) and organizational psychology literature (HRO, organizational learning, error taxonomy) publish in entirely separate journals (Reliability Engineering & System Safety vs. Organization Science, Academy of Management Review). The quantitative probabilistic risk assessment (PRA) community in nuclear engineering rarely reads Weick or Perrow; organizational sociologists rarely read NUREG safety reports. Helmreich's CRM research (BMJ 2000) bridging aviation to surgery is an exception that proves the rule.\n",
      "translation_table": [
        {
          "field_a_term": "latent organizational conditions (Reason)",
          "field_b_term": "design flaws and systemic weaknesses in engineered systems",
          "note": "Latent conditions are the organizational equivalent of hidden engineering defects"
        },
        {
          "field_a_term": "tight coupling (Perrow)",
          "field_b_term": "systems where failures propagate faster than human response time",
          "note": "Tightly coupled systems have no slack to interrupt cascades — a sociotechnical property"
        },
        {
          "field_a_term": "interactive complexity (Perrow)",
          "field_b_term": "non-linear component interactions not visible in design documentation",
          "note": "Unexpected interactions between subsystems create failure modes not anticipated in FMEAs"
        },
        {
          "field_a_term": "mindful organizing (Weick)",
          "field_b_term": "safety management system (SMS) design in aviation/nuclear",
          "note": "HRO principles translate into engineering-grade safety management protocols"
        },
        {
          "field_a_term": "CRM error taxonomy (slips, lapses, mistakes, violations)",
          "field_b_term": "human factors integration in system design (MIL-STD-1472)",
          "note": "Human error taxonomy drives HFE (Human Factors Engineering) interface design"
        },
        {
          "field_a_term": "normalization of deviance (Vaughan — Challenger)",
          "field_b_term": "risk assessment creep under production pressure",
          "note": "Organizational sociology explains why engineers tolerate known risks under schedule pressure"
        }
      ],
      "references": [
        {
          "note": "Reason (1990) — Human Error (Swiss Cheese model)",
          "url": "https://www.cambridge.org/core/books/human-error/9681D63E9CB6A32C49FD0479BF3D5B78"
        },
        {
          "note": "Perrow (1984) — Normal Accidents: Living with High-Risk Technologies",
          "url": "https://press.princeton.edu/books/paperback/9780691004129/normal-accidents"
        },
        {
          "note": "Weick & Sutcliffe (2001) — Managing the Unexpected: High Reliability Organizations",
          "url": "https://www.wiley.com/en-us/Managing+the+Unexpected%3A+Sustained+Performance+in+a+Complex+World%2C+3rd+Edition-p-9781118862414"
        },
        {
          "note": "Helmreich (2000) — On error management: lessons from aviation (BMJ)",
          "doi": "10.1136/bmj.320.7237.781"
        }
      ],
      "last_reviewed": "2026-05-06",
      "file": "cross-domain/social-science-engineering/b-human-factors-system-safety.yaml"
    },
    {
      "id": "b-social-contagion-complex-threshold",
      "title": "The spread of social behaviours (innovation adoption, social movements, voting) requires exposure to multiple independent contacts (complex contagion) unlike disease spread (simple contagion), described by threshold models where adoption occurs when the fraction of adopting neighbours exceeds an individual-specific threshold φ — a fundamentally different dynamic than standard SIR epidemics.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "Granovetter (1978) showed that riot or protest participation depends on threshold distributions in populations; the cascade dynamics depend critically on the shape of the threshold distribution φ_i. Centola (2010) demonstrated empirically that complex contagion (requiring multiple exposures) spreads",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-social-movement-cascade-clustered-network-advantage"
      ],
      "communication_gap": "Epidemiologists model disease spread with SIR (simple contagion) and apply the same framework to health behaviour change. Sociologists studying social movements use threshold models without connection to network science. Centola (2010) provided the empirical demonstration of the distinction, but adoption of the complex contagion framework in public health practice lags.\n",
      "translation_table": [
        {
          "field_a_term": "Individual adoption threshold φ_i (fraction of neighbours who must adopt first)",
          "field_b_term": "No equivalent in SIR: binary exposure probability in simple contagion",
          "note": "Complex contagion requires reinforcement; simple contagion requires single exposure"
        },
        {
          "field_a_term": "Clustered network (high clustering coefficient)",
          "field_b_term": "Network that provides reinforcing triangles for threshold-exceeding",
          "note": "Clustering helps complex contagion (redundant exposure) but hinders simple contagion (fewer bridges)"
        },
        {
          "field_a_term": "Global cascade condition (Watts 2002)",
          "field_b_term": "Epidemic threshold R₀ > 1 in simple contagion",
          "note": "Both are percolation-type thresholds; cascade condition involves threshold distribution, not just degree"
        },
        {
          "field_a_term": "Weak ties (Granovetter) as bridges for information",
          "field_b_term": "Long-range edges enabling simple contagion but hindering complex contagion",
          "note": "Weak ties are bridges that lack the clustering needed for threshold reinforcement in complex contagion"
        }
      ],
      "references": [
        {
          "doi": "10.1086/226707",
          "note": "Granovetter (1978) AJS – threshold models of collective behavior; cascade dynamics"
        },
        {
          "doi": "10.1126/science.1185231",
          "note": "Centola (2010) Science – the spread of behavior in an online social network experiment; complex contagion"
        },
        {
          "doi": "10.1103/PhysRevE.70.026116",
          "note": "Dodds & Watts (2004) PRE – universal behavior in a generalised model of contagion"
        },
        {
          "doi": "10.1073/pnas.252631999",
          "note": "Watts (2002) PNAS – a simple model of global cascades on random networks"
        }
      ],
      "last_reviewed": "2026-05-07",
      "file": "cross-domain/social-science-epidemiology/b-social-contagion-complex-threshold.yaml"
    },
    {
      "id": "b-cultural-memes-shannon-entropy",
      "title": "Cultural transmission of memes across social networks obeys Shannon's noisy channel theorem — meme fidelity, cultural drift, and the homogenising effects of mass media are quantitatively described by channel capacity, noise models, and the source-channel coding theorem from information theory.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "proposed",
      "bridge_claim": "Shannon (1948) proved that any communication channel with noise can reliably transmit information at rates up to its channel capacity C = max_{p(x)} I(X;Y), and that error rates rise exponentially above this rate. Cultural transmission is a noisy communication channel: a cultural idea (meme) is enco",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-meme-channel-social-media-bias"
      ],
      "communication_gap": "Shannon's (1948) information theory was developed for electrical engineering; the application to cultural evolution was made informally by Dawkins (1976) with the meme concept but without formal information-theoretic tools. Henrich (2004) and Boyd & Richerson's cultural evolution models use population genetics mathematics (Wright-Fisher) rather than information theory. The information cascade literature (Bikhchandani, Welch) is in economics and does not reference Shannon. Social media researchers measure engagement and virality without connecting to channel capacity theory. The formal unification requires fluency in both information theory and cultural evolution — a rare combination.\n",
      "translation_table": [
        {
          "field_a_term": "Message source (transmitter)",
          "field_b_term": "Individual encoding and expressing a cultural variant",
          "note": "Encoding includes gesture, language, artifact, demonstration — each with different channel properties"
        },
        {
          "field_a_term": "Channel noise",
          "field_b_term": "Copying errors, misimitation, selective memory, motivated reasoning",
          "note": "Cultural drift (neutral evolution) = white-noise channel; cultural selection = signal-dependent noise"
        },
        {
          "field_a_term": "Channel capacity C",
          "field_b_term": "Maximum rate of faithful cultural transmission",
          "note": "Higher capacity channels (writing, video) preserve more variants; oral transmission has lower capacity"
        },
        {
          "field_a_term": "Error-correcting code",
          "field_b_term": "Cultural institution or ritual that enforces meme fidelity",
          "note": "Religious ritual, written scripture, legal codes = redundant encodings that reduce effective error rate"
        },
        {
          "field_a_term": "Burst error (correlated error block)",
          "field_b_term": "Information cascade — herd behavior erasing individual information",
          "note": "Bikhchandani et al. (1992) cascade; burst correcting codes = deliberate information injection to break herds"
        },
        {
          "field_a_term": "Channel bandwidth",
          "field_b_term": "Social media reach — number of people a single post can reach",
          "note": "Social media increases bandwidth dramatically while potentially degrading signal-to-noise ratio"
        },
        {
          "field_a_term": "Channel capacity limit (Shannon)",
          "field_b_term": "Dunbar's number — cognitive limit on stable social relationships",
          "note": "Neocortex size ~ bandwidth of social grooming channel; bandwidth sets maximum stable network degree"
        }
      ],
      "references": [
        {
          "note": "Dawkins (1976) The Selfish Gene (Oxford UP) — meme concept; replicator dynamics of cultural ideas",
          "url": "https://global.oup.com/academic/product/the-selfish-gene-9780198788607"
        },
        {
          "doi": "10.1002/j.1538-7305.1948.tb01338.x",
          "note": "Shannon (1948) Bell Syst Tech J 27:379 — mathematical theory of communication; channel capacity"
        },
        {
          "doi": "10.2307/772510",
          "note": "Henrich (2004) Am Antiq 69:197 — demography and cultural evolution; copying error = cultural drift"
        },
        {
          "note": "Sperber (1996) Explaining Culture (Blackwell) — epidemiology of representations; cognitive attractors",
          "url": "https://www.wiley.com/en-us/Explaining+Culture-p-9780631200451"
        },
        {
          "doi": "10.1086/261849",
          "note": "Bikhchandani, Hirshleifer & Welch (1992) J Polit Econ 100:992 — information cascades in herding behavior"
        }
      ],
      "last_reviewed": "2026-05-05",
      "file": "cross-domain/social-science-information/b-cultural-memes-shannon-entropy.yaml"
    },
    {
      "id": "b-privacy-differential-privacy",
      "title": "Differential privacy provides an information-theoretic guarantee — epsilon bounds the log-likelihood ratio an adversary can achieve distinguishing any individual's data — creating a mathematically precise privacy-utility tradeoff that is dual to Neyman-Pearson hypothesis testing, bridging social privacy norms to information theory and statistical decision theory.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "Differential privacy (Dwork et al. 2006): a mechanism M satisfies epsilon-DP if for any adjacent datasets D, D' differing by one record: P[M(D)∈S] ≤ exp(epsilon) × P[M(D')∈S]. This is a formal guarantee of individual privacy in statistical databases. The privacy-utility tradeoff: higher epsilon → le",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-differential-privacy-hypothesis-testing-connection"
      ],
      "communication_gap": "Privacy law and social science define privacy in terms of contextual integrity, norms, and legal concepts with no mathematical precision. Information theorists and cryptographers developed DP with mathematical rigor but limited connection to social concepts of harm and contextual expectation. Policy deployment (Census, Apple) required bridging these communities.\n",
      "translation_table": [
        {
          "field_a_term": "individual privacy (social norm)",
          "field_b_term": "epsilon-differential privacy (information-theoretic bound)",
          "note": "DP translates the vague social concept of privacy into a quantitative bound"
        },
        {
          "field_a_term": "sensitive database record",
          "field_b_term": "one-element difference in adjacent datasets D, D'",
          "note": "DP protects any single individual's contribution"
        },
        {
          "field_a_term": "privacy budget epsilon",
          "field_b_term": "log-likelihood ratio bound in hypothesis test",
          "note": "exp(epsilon) = maximum Neyman-Pearson distinguishing advantage"
        },
        {
          "field_a_term": "noise-adding mechanism (Laplace, Gaussian)",
          "field_b_term": "randomized test with controlled error probabilities",
          "note": "both achieve the desired epsilon by calibrating noise to sensitivity"
        },
        {
          "field_a_term": "privacy loss accounting (composition)",
          "field_b_term": "information-theoretic capacity bound under composition",
          "note": "sequential DP queries compose; total epsilon grows with number of queries"
        }
      ],
      "references": [
        {
          "note": "Dwork et al. (2006) ICALP — Calibrating Noise to Sensitivity in Private Data Analysis"
        },
        {
          "note": "Dwork & Roth (2014) Found Trends Theor Comput Sci — The Algorithmic Foundations of Differential Privacy"
        },
        {
          "note": "Vadhan (2017) in Tutorials on the Foundations of Cryptography"
        },
        {
          "note": "Apple (2017) Differential Privacy Overview — production deployment description"
        }
      ],
      "last_reviewed": "2026-05-06",
      "file": "cross-domain/social-science-information-theory/b-privacy-differential-privacy.yaml"
    },
    {
      "id": "b-agent-based-modeling-emergent-institutions",
      "title": "Agent-based models with heterogeneous agents following local rules generate macro-level emergent institutions — Schelling segregation, Axelrod cooperation, and Sugarscape wealth distributions — unifying mathematical complexity theory with social science explanation of spontaneous institutional order.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "Agent-based models (ABMs) are bottom-up simulations where N heterogeneous agents follow simple local behavioral rules, and macro-level social patterns emerge from their interactions without being programmed at the aggregate level. This bottom-up emergence is the mathematical formalization of what so",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-schelling-abm-segregation-threshold-real-world-preference-calibration"
      ],
      "communication_gap": "ABM research spans social science (JASSS — J Artificial Societies and Social Simulation), computer science (AAMAS), and mathematical physics (complexity journals). Social scientists suspicious of simulation vs. empirical work resist ABMs as \"just coding assumptions.\" Mathematicians developing rigorous mean-field theory for ABMs (interacting particle systems, McKean-Vlasov equations) rarely engage with social science ABM literature. The NetLogo platform is widely used in education but rarely discussed in physics literature despite implementing cellular automaton dynamics. Economists trained in DSGE models (representative agents) resist heterogeneous ABM approaches as lacking analytical tractability.\n",
      "translation_table": [
        {
          "field_a_term": "agent decision rule (behavioral micro-specification)",
          "field_b_term": "Hamiltonian or energy function in statistical mechanics"
        },
        {
          "field_a_term": "emergent macro-pattern (segregation, cooperation, inequality)",
          "field_b_term": "thermodynamic phase or collective order parameter"
        },
        {
          "field_a_term": "agent heterogeneity (different preference parameters)",
          "field_b_term": "quenched disorder in spin glass physics"
        },
        {
          "field_a_term": "tipping point in ABM (rapid phase transition in social outcome)",
          "field_b_term": "first-order phase transition (discontinuous change)"
        },
        {
          "field_a_term": "Schelling segregation threshold (1/3 same-type neighbors)",
          "field_b_term": "critical connectivity in percolation theory"
        },
        {
          "field_a_term": "Nash equilibrium in El Farol / minority game",
          "field_b_term": "maximum entropy distribution in statistical mechanics"
        },
        {
          "field_a_term": "NetLogo simulation platform (agent rules in patches/turtles)",
          "field_b_term": "cellular automaton or lattice model in computational physics"
        }
      ],
      "references": [
        {
          "note": "Axelrod (1984) The Evolution of Cooperation — Basic Books (tit-for-tat tournament)"
        },
        {
          "note": "Epstein & Axtell (1996) Growing Artificial Societies — MIT Press (Sugarscape)"
        },
        {
          "doi": "10.1080/0022250X.1971.9989794",
          "note": "Schelling (1971) J Math Sociol 1:143 — dynamic models of segregation"
        },
        {
          "doi": "10.2307/2117981",
          "note": "Arthur (1994) Am Econ Rev 84:406 — inductive reasoning and bounded rationality (El Farol bar problem)"
        }
      ],
      "last_reviewed": "2026-05-06",
      "file": "cross-domain/social-science-mathematics/b-agent-based-modeling-emergent-institutions.yaml"
    },
    {
      "id": "b-algorithmic-fairness-social-choice",
      "title": "Formal impossibility theorems in algorithmic fairness — showing that demographic parity, equalized odds, and calibration cannot simultaneously hold when base rates differ — are mathematical analogs of Arrow's impossibility theorem in social choice theory.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "Algorithmic fairness seeks criteria that trained classifiers should satisfy to avoid discrimination. Three prominent criteria conflict when base rates differ across groups: (1) demographic parity P(Ŷ=1|A=0) = P(Ŷ=1|A=1); (2) equalized odds P(Ŷ=1|Y=y,A=a) = const across groups; (3) calibration P(Y=1|",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-causal-fairness-resolves-impossibility-tradeoffs"
      ],
      "communication_gap": "Machine learning researchers rarely engage with formal social choice theory; political philosophers and social choice theorists rarely read NeurIPS or FAccT. The legal concept of disparate impact (US law) predates the mathematical fairness impossibility theorems by 30 years but is not recognized by ML researchers as an Arrow-like constraint. Policy discussions on AI fairness rarely cite the impossibility theorems, leading to demands for solutions that are mathematically unachievable.\n",
      "translation_table": [
        {
          "field_a_term": "Arrow's impossibility theorem (social choice)",
          "field_b_term": "Chouldechova-Kleinberg fairness impossibility",
          "note": "Both show that a collection of seemingly reasonable axioms is mutually inconsistent"
        },
        {
          "field_a_term": "voting rule / aggregation mechanism",
          "field_b_term": "classifier / risk score algorithm",
          "note": "Both aggregate individual-level information into a social or individual decision"
        },
        {
          "field_a_term": "Pareto efficiency (no one can be better off without making someone worse off)",
          "field_b_term": "accuracy-fairness Pareto frontier",
          "note": "Fairness constraints push classifiers off the Pareto frontier of raw accuracy"
        },
        {
          "field_a_term": "independence of irrelevant alternatives",
          "field_b_term": "path-independence of fairness measure to feature set",
          "note": "Adding irrelevant protected features can change fairness metric values — a form of IIA failure"
        },
        {
          "field_a_term": "Nash bargaining solution (maximize product of utilities)",
          "field_b_term": "fairness-accuracy trade-off optimization with Pareto-weighted objective",
          "note": "Nash bargaining applied to fairness-accuracy frontier selects a specific operating point"
        }
      ],
      "references": [
        {
          "doi": "10.1089/big.2016.0047",
          "note": "Chouldechova (2017) — Fair prediction with disparate impact, Big Data 5:153"
        },
        {
          "doi": "10.4230/LIPIcs.ITCS.2017.43",
          "note": "Kleinberg, Mullainathan & Raghavan (2017) — Inherent trade-offs in algorithmic fairness, ITCS"
        },
        {
          "note": "Kusner et al. (2017) — Counterfactual fairness, NeurIPS 30"
        },
        {
          "note": "Barocas, Hardt & Narayanan (2019) Fairness and Machine Learning, fairmlbook.org"
        }
      ],
      "last_reviewed": "2026-05-06",
      "file": "cross-domain/social-science-mathematics/b-algorithmic-fairness-social-choice.yaml"
    },
    {
      "id": "b-auction-theory-mechanism-design",
      "title": "Mechanism design reverses game theory — designing incentive structures so that rational self-interest produces socially optimal outcomes",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "Vickrey's second-price auction (1961) proves that bidding true valuation is a dominant strategy — truth-telling is optimal regardless of others' strategies. The revenue equivalence theorem (Myerson 1981) establishes that all standard auctions (first-price, second-price, English, Dutch) yield identic",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-mechanism-design-spectrum-auctions-efficiency"
      ],
      "communication_gap": "Auction theory is taught in economics programs; operations research teaches optimization of allocation problems. The unification under mechanism design (Nobel 2007) is known across social science but the mathematical depth of the revelation principle and virtual valuations is rarely taught outside specialized graduate courses in economic theory.\n",
      "translation_table": [
        {
          "field_a_term": "revelation principle",
          "field_b_term": "without loss of generality consider only direct truthful mechanisms",
          "note": "Any equilibrium of any mechanism can be replicated by a truthful direct mechanism"
        },
        {
          "field_a_term": "dominant strategy incentive compatibility (DSIC)",
          "field_b_term": "truth-telling is optimal regardless of others' reports",
          "note": "Vickrey-Clarke-Groves mechanisms achieve DSIC for social welfare maximization"
        },
        {
          "field_a_term": "virtual valuation phi(v) = v - (1-F(v))/f(v)",
          "field_b_term": "marginal revenue in Myerson's optimal auction",
          "note": "Optimal mechanism sells to bidder with highest non-negative virtual valuation"
        },
        {
          "field_a_term": "Bayesian Nash equilibrium",
          "field_b_term": "equilibrium in which each agent best-responds to beliefs about others",
          "note": "First-price auction has a unique symmetric BNE with shading b(v) = E[max others | others < v]"
        }
      ],
      "references": [
        {
          "doi": "10.1111/j.1540-6261.1961.tb04131.x",
          "note": "Vickrey, W. (1961). Counterspeculation, auctions, and competitive sealed tenders. J Finance 16:8."
        },
        {
          "doi": "10.1287/moor.6.1.58",
          "note": "Myerson, R.B. (1981). Optimal auction design. Math Oper Res 6:58."
        },
        {
          "note": "Klemperer, P. (2004). Auctions: Theory and Practice. Princeton University Press."
        },
        {
          "note": "Milgrom, P. (2004). Putting Auction Theory to Work. Cambridge University Press."
        }
      ],
      "last_reviewed": "2026-05-06",
      "file": "cross-domain/social-science-mathematics/b-auction-theory-mechanism-design.yaml"
    },
    {
      "id": "b-bargaining-theory-negotiation",
      "title": "Nash and Rubinstein bargaining theory bridges mathematics and social science: axiomatic and strategic foundations yield unique equilibrium solutions to negotiation that apply to labor negotiations, climate burden sharing, divorce settlements, and M&A deals.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "Bargaining theory provides mathematical foundations for real-world negotiation. Nash (1950) axiomatic solution: given a feasible set S of utility pairs and disagreement point d = (d₁, d₂) (utilities if no agreement), the Nash bargaining solution maximizes (u₁-d₁)(u₂-d₂) over S — the unique solution ",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-outside-option-effect-causal-wage-effect"
      ],
      "communication_gap": "Economists know bargaining theory but rarely engage with the experimental psychology of negotiation (anchoring, reactive devaluation, loss aversion — which violate the rational actor assumptions). Psychologists and negotiation practitioners rarely engage with the formal theory. Labor economists who study wages use bargaining models but often treat the disagreement point empirically rather than theoretically.\n",
      "translation_table": [
        {
          "field_a_term": "Nash bargaining solution (maximize (u₁-d₁)(u₂-d₂))",
          "field_b_term": "unique efficient split of the surplus above the disagreement outcome",
          "note": "four axioms uniquely characterize the solution; no explicit mechanism needed"
        },
        {
          "field_a_term": "disagreement point d = (d₁, d₂)",
          "field_b_term": "outside options / threat points / BATNA",
          "note": "BATNA (best alternative to negotiated agreement) is d; improving BATNA improves outcome"
        },
        {
          "field_a_term": "Rubinstein discount factor δ (patience)",
          "field_b_term": "time cost of negotiation; impatient parties accept worse deals sooner",
          "note": "higher δ (more patient) → more bargaining power in alternating offers"
        },
        {
          "field_a_term": "Kalai-Smorodinsky monotonicity axiom",
          "field_b_term": "fairness norm that proportional gains are maintained across expanded sets",
          "note": "Nash fails monotonicity; KS satisfies it; different social norms lead to different solutions"
        },
        {
          "field_a_term": "IIA (independence of irrelevant alternatives)",
          "field_b_term": "irrelevant alternatives should not change the bargaining outcome",
          "note": "Nash satisfies IIA; violating it leads to strategic manipulation via fake options"
        }
      ],
      "references": [
        {
          "doi": "10.2307/1907266",
          "note": "Nash (1950) The bargaining problem; Econometrica 18:155"
        },
        {
          "doi": "10.2307/1912531",
          "note": "Rubinstein (1982) Perfect equilibrium in a bargaining model; Econometrica 50:97"
        },
        {
          "doi": "10.2307/1914187",
          "note": "Kalai & Smorodinsky (1975) Other solutions to Nash's bargaining problem; Econometrica 43:513"
        },
        {
          "note": "Osborne & Rubinstein (1990) Bargaining and Markets; Academic Press"
        }
      ],
      "last_reviewed": "2026-05-07",
      "file": "cross-domain/social-science-mathematics/b-bargaining-theory-negotiation.yaml"
    },
    {
      "id": "b-bayesian-networks-causal-reasoning",
      "title": "Bayesian Networks and Causal Reasoning — directed graphical models, d-separation, and Pearl's do-calculus formalise the distinction between correlation and causation",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "A Bayesian network (BN) is a directed acyclic graph (DAG) in which nodes represent random variables and edges encode conditional dependencies. The joint distribution factorises as P(X₁,…,Xₙ) = ∏P(Xᵢ|parents(Xᵢ)), enabling efficient inference in high-dimensional spaces. The d-separation criterion (Pe",
      "open_unknowns": [],
      "related_hypotheses": [],
      "communication_gap": "Probabilistic graphical models were developed in the AI/computer science community (Pearl, Lauritzen) while causal inference methodology developed separately in epidemiology (Greenland, Robins) and econometrics (Heckman, Angrist). These traditions use different notation and emphasise different identifiability conditions. Pearl's (2000) unification in Causality was a landmark but remains incompletely adopted across social science disciplines, partly due to resistance to explicit causal DAG specification.\n",
      "translation_table": [
        {
          "field_a_term": "directed acyclic graph (DAG)",
          "field_b_term": "causal diagram encoding researcher assumptions about data-generating process",
          "note": "DAGs in social science encode the investigator's causal model; topology determines identifiability"
        },
        {
          "field_a_term": "do-calculus intervention do(X=x)",
          "field_b_term": "randomised controlled trial or policy intervention",
          "note": "do(X=x) emulates an RCT by breaking confounding paths; observational P(Y|X=x) ≠ P(Y|do(X=x))"
        },
        {
          "field_a_term": "backdoor criterion",
          "field_b_term": "identification of sufficient confounders to control for in regression",
          "note": "A set Z satisfies the backdoor criterion if it blocks all back-door paths from X to Y"
        },
        {
          "field_a_term": "d-separation (conditional independence)",
          "field_b_term": "partial correlation equals zero after controlling for a set of variables",
          "note": "Graphical test replaces algebraic independence checks; faithfulness assumption makes this empirically testable"
        },
        {
          "field_a_term": "counterfactual P(Y_x=y|evidence)",
          "field_b_term": "individual treatment effect — would this patient have recovered on drug X?",
          "note": "Counterfactuals require structural causal model (SCM) with noise variables; non-parametric identification is harder"
        },
        {
          "field_a_term": "collider bias (opening a path by conditioning on a collider)",
          "field_b_term": "selection bias or Berkson's paradox in observational studies",
          "note": "Conditioning on a collider induces spurious correlation — a major source of bias in clinical and social research"
        }
      ],
      "references": [
        {
          "note": "Pearl (1988) Probabilistic Reasoning in Intelligent Systems — BNs and d-separation"
        },
        {
          "note": "Pearl (2000) Causality — do-calculus, counterfactuals, and causal inference"
        },
        {
          "note": "Spirtes, Glymour & Scheines (2000) Causation, Prediction, Search — PC algorithm"
        },
        {
          "note": "Peters, Janzing & Schölkopf (2017) Elements of Causal Inference — modern treatment"
        }
      ],
      "last_reviewed": "2026-05-06",
      "file": "cross-domain/social-science-mathematics/b-bayesian-networks-causal-reasoning.yaml"
    },
    {
      "id": "b-condorcet-paradox-voting-cycles",
      "title": "The Condorcet paradox demonstrates that majority voting on three or more alternatives can produce cyclic collective preferences (A beats B, B beats C, C beats A) even when all individual preferences are transitive — a mathematical impossibility result underlying Arrow's theorem and spatial voting theory, with the median voter theorem providing the single-peaked exception.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "Condorcet (1785) showed that pairwise majority voting over three alternatives A, B, C with three voter types (A>B>C, B>C>A, C>A>B) produces majority cycles: A beats B by 2-1, B beats C by 2-1, C beats A by 2-1. This is not a special case: with n>2 alternatives and m voters with random uniform prefer",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-collective-action-ostrom-design-principles-v2"
      ],
      "communication_gap": "Condorcet (1785) wrote in French and was largely ignored by English-language political scientists until the 20th century. Arrow (1951) rediscovered the impossibility in an economics framework. Black (1948) independently discovered the median voter theorem and published in the Journal of Political Economy. Social choice theory developed primarily in economics departments (Harvard, Rochester) while political scientists studied voting behavior empirically with minimal awareness of the mathematical impossibility results until the 1970s.\n",
      "translation_table": [
        {
          "field_a_term": "Condorcet winner (beats all others in pairwise majority vote)",
          "field_b_term": "median voter bliss point (in single-peaked preferences)",
          "note": "when single-peakedness holds, the Condorcet winner = median voter position"
        },
        {
          "field_a_term": "Condorcet cycle (A>B>C>A majority preferences)",
          "field_b_term": "intransitive tournament graph (a directed graph with no dominant vertex)",
          "note": "the majority-preference relation corresponds to a tournament graph; cycles are non-transitive tournaments"
        },
        {
          "field_a_term": "single-peaked preferences (one-dimensional issue space)",
          "field_b_term": "unimodal probability distribution on the real line",
          "note": "single-peakedness is the social choice analog of unimodality; guarantees median optimality"
        },
        {
          "field_a_term": "Arrow's impossibility (no ideal SWF)",
          "field_b_term": "no maximal element in a cyclic preference tournament",
          "note": "cyclic preferences mean no choice rule can satisfy all fairness axioms simultaneously"
        }
      ],
      "references": [
        {
          "note": "Condorcet (1785) — Essai sur l'application de l'analyse à la probabilité des décisions rendues à la pluralité des voix"
        },
        {
          "doi": "10.1086/256963",
          "note": "Black (1948) — On the rationale of group decision-making; J Polit Econ 56:23"
        },
        {
          "note": "Arrow (1951) — Social Choice and Individual Values; Yale University Press"
        },
        {
          "note": "Downs (1957) — An Economic Theory of Democracy; Harper & Row"
        }
      ],
      "last_reviewed": "2026-05-06",
      "file": "cross-domain/social-science-mathematics/b-condorcet-paradox-voting-cycles.yaml"
    },
    {
      "id": "b-network-centrality-social-influence",
      "title": "Network centrality measures — degree, betweenness, eigenvector, and Katz centrality — derived from spectral properties of the adjacency matrix, provide a unified mathematical framework quantifying social influence, predicting epidemiological superspreaders, economic wage inequality in oligopoly, and information diffusion in social networks.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "Social influence in a network G = (V, E) with adjacency matrix A is captured by multiple centrality measures, all derivable from A's spectral decomposition. Degree centrality: k_i = Σⱼ Aᵢⱼ (direct connections). Betweenness centrality: B_i = Σ_{s≠t} σ_{st}(i)/σ_{st} where σ_{st}(i) is the number of s",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-eigenvector-centrality-superspreader-epidemic-prediction"
      ],
      "communication_gap": "Sociologists invented betweenness centrality (Freeman 1977) and published in Sociometry; mathematicians developed spectral graph theory in parallel in discrete mathematics journals. Economists (Bonacich, Katz) developed network centrality independently in economics journals. The synthesis — that all these are eigenvector variants — is well-known to network scientists but rarely presented in standard sociology or economics curricula. PageRank's connection to sociological influence theory is rarely discussed outside network science.\n",
      "translation_table": [
        {
          "field_a_term": "adjacency matrix A of social network",
          "field_b_term": "who-influences-whom relationship in a community",
          "note": "A_ij = 1 if person j can directly influence person i; A encodes all social structure"
        },
        {
          "field_a_term": "principal eigenvalue λ₁(A) of adjacency matrix",
          "field_b_term": "epidemic threshold — R₀ = β/γ must exceed 1/λ₁ for epidemic",
          "note": "Spectral gap λ₁ - λ₂ determines robustness to node removal"
        },
        {
          "field_a_term": "eigenvector centrality (principal eigenvector)",
          "field_b_term": "Bonacich power — equilibrium payoff in economic network games",
          "note": "Formally identical: both are principal eigenvector of the weighted adjacency matrix"
        },
        {
          "field_a_term": "betweenness centrality B_i",
          "field_b_term": "information broker or structural hole position in sociology",
          "note": "Burt's (1992) structural holes correspond to high-betweenness nodes"
        },
        {
          "field_a_term": "PageRank damping factor α",
          "field_b_term": "probability that influence propagates vs. attenuates",
          "note": "α = 0.85 (Google default) has no sociological derivation; biology suggests α varies by context"
        }
      ],
      "references": [
        {
          "doi": "10.2307/3033543",
          "note": "Freeman (1977) A set of measures of centrality based on betweenness. Sociometry 40:35"
        },
        {
          "doi": "10.2307/2785990",
          "note": "Katz (1953) A new status index derived from sociometric analysis. Sociometry 16:39"
        },
        {
          "doi": "10.1086/228631",
          "note": "Bonacich (1987) Power and centrality: a family of measures. Am J Sociol 92:1170"
        },
        {
          "doi": "10.1016/S0169-7552(98)00110-X",
          "note": "Brin & Page (1998) The anatomy of a large-scale hypertextual web search engine. Comput Netw ISDN Syst 30:107"
        }
      ],
      "last_reviewed": "2026-05-06",
      "file": "cross-domain/social-science-mathematics/b-network-centrality-social-influence.yaml"
    },
    {
      "id": "b-prediction-markets-information-aggregation",
      "title": "Hanson's logarithmic market scoring rule is a proper scoring rule that implements Bayesian belief aggregation as a market mechanism — linking information theory to political economy",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "Prediction markets are a social mechanism that converts dispersed private information into publicly observable probabilities. Arrow-Debreu contingent claims theory proves that in complete markets, the equilibrium price of an asset paying $1 if event X occurs equals the market-consensus probability P",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-lmsr-automated-market-maker-dominates-polls-epistemic-accuracy"
      ],
      "communication_gap": "Market design economists publish in American Economic Review and Journal of Political Economy; statisticians studying proper scoring rules publish in Journal of the Royal Statistical Society and Annals of Statistics. Machine learning researchers rediscovered LMSR-equivalent loss functions (cross-entropy, softmax) independently. These three communities have largely parallel literatures on the same mathematical structure.\n",
      "translation_table": [
        {
          "field_a_term": "Arrow-Debreu contingent claim price (economics)",
          "field_b_term": "posterior probability (Bayesian statistics)",
          "note": "In complete markets with rational agents, asset price = probability — market as Bayesian aggregator"
        },
        {
          "field_a_term": "LMSR cost function C(q) (mechanism design)",
          "field_b_term": "log partition function (statistical physics / ML)",
          "note": "C(q) = b·log(Σexp(qᵢ/b)) is exactly the free energy / log-sum-exp from statistical mechanics"
        },
        {
          "field_a_term": "softmax pricing (prediction markets)",
          "field_b_term": "Boltzmann distribution (statistical physics)",
          "note": "LMSR prices are a Boltzmann distribution over outcomes with b playing the role of temperature"
        },
        {
          "field_a_term": "proper scoring rule (statistics)",
          "field_b_term": "incentive-compatible mechanism (mechanism design)",
          "note": "A mechanism where truthful revelation is a dominant strategy — connects scoring rules to revelation principle"
        },
        {
          "field_a_term": "no-trade theorem (Milgrom-Stokey)",
          "field_b_term": "efficient market hypothesis (finance)",
          "note": "Both predict that no profits can be made from commonly known information — but both are violated empirically"
        }
      ],
      "references": [
        {
          "doi": "10.2307/2295952",
          "note": "Arrow (1964). The role of securities in the optimal allocation of risk-bearing. Rev Econ Stud 31:91."
        },
        {
          "note": "Hanson (2003). Combinatorial information market design. J Polit 17:107."
        },
        {
          "doi": "10.1257/0895330041371321",
          "note": "Wolfers & Zitzewitz (2004). Prediction markets. J Econ Perspect 18:107."
        },
        {
          "doi": "10.1016/0022-0531(82)90046-1",
          "note": "Milgrom & Stokey (1982). Information, trade and common knowledge. J Econ Theory 26:17."
        }
      ],
      "last_reviewed": "2026-05-06",
      "file": "cross-domain/social-science-mathematics/b-prediction-markets-information-aggregation.yaml"
    },
    {
      "id": "b-social-capital-network-science",
      "title": "Granovetter's \"strength of weak ties\" and Burt's structural holes in social capital theory are precisely identified with bridge edges and high-betweenness-centrality nodes in graph theory: social capital reduces to computable network topology, and the Erdős-Rényi giant component transition predicts the critical network density for information to spread society-wide.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "Social capital theory (Granovetter 1973, Burt 1992, Coleman 1988) asserts that an individual's social position determines their access to information, resources, and opportunities. Network science provides exact mathematical translations of these sociological concepts:\n1. WEAK TIES = BRIDGE EDGES: G",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-social-capital-eigenvector-centrality-income"
      ],
      "communication_gap": "Granovetter (1973) and Burt (1992) developed social capital theory in the sociology literature using graph-theoretic language informally but without formal graph theory. Erdős and Rényi worked in pure mathematics completely disconnected from social science. Barabási & Albert (1999) explicitly noted the social network application of scale-free graphs, but the sociological literature absorbed network science tools slowly. The precise algebraic equivalences between sociological concepts (closure, structural holes, weak ties) and graph-theoretic quantities (clustering coefficient, betweenness, bridge edges) are now textbook material in computational social science but remain unknown to most empirical sociologists who use social capital as a qualitative concept.\n",
      "translation_table": [
        {
          "field_a_term": "Weak tie (acquaintance relationship)",
          "field_b_term": "Bridge edge or local bridge in graph theory",
          "note": "Edges with high edge betweenness are almost always weak ties; bridges cross community boundaries"
        },
        {
          "field_a_term": "Structural hole (gap spanned by broker)",
          "field_b_term": "High vertex betweenness centrality B(v)",
          "note": "A node spanning structural holes has many shortest paths passing through it"
        },
        {
          "field_a_term": "Burt's network constraint C_i",
          "field_b_term": "Inverse of local betweenness / redundancy of contacts",
          "note": "High constraint = contacts are densely interconnected = no structural holes = low social capital"
        },
        {
          "field_a_term": "Social capital stock (Coleman)",
          "field_b_term": "Eigenvector centrality / PageRank",
          "note": "Influence propagates multiplicatively through the network; eigenvector centrality captures recursive influence"
        },
        {
          "field_a_term": "Critical network density for information spread",
          "field_b_term": "Erdős-Rényi giant component threshold p_c = 1/n (⟨k⟩ = 1)",
          "note": "Below threshold: fragmented society; above: connected giant component enables global diffusion"
        },
        {
          "field_a_term": "Social closure (Coleman 1988)",
          "field_b_term": "Clustering coefficient C_v",
          "note": "Dense cliques = high clustering = social closure; facilitates trust and norm enforcement"
        },
        {
          "field_a_term": "Scale-free social network (hubs, influencers)",
          "field_b_term": "Power-law degree distribution P(k) ∝ k^{-γ}",
          "note": "Preferential attachment (the rich get richer) generates hubs analogous to celebrity influencers"
        }
      ],
      "references": [
        {
          "doi": "10.1086/225469",
          "note": "Granovetter (1973) Am J Soc 78:1360 — The strength of weak ties; foundational social capital theory"
        },
        {
          "note": "Burt (1992) Structural Holes: The Social Structure of Competition (Harvard UP) — structural holes and competitive advantage",
          "url": "https://www.hup.harvard.edu/catalog.php?isbn=9780674843714"
        },
        {
          "doi": "10.1126/science.286.5439.509",
          "note": "Barabási & Albert (1999) Science 286:509 — emergence of scaling in random networks; preferential attachment"
        },
        {
          "note": "Erdős & Rényi (1960) Publ Math Inst Hung Acad Sci 5:17 — on the evolution of random graphs; giant component phase transition",
          "url": "https://www.renyi.hu/~p_erdos/1960-10.pdf"
        },
        {
          "doi": "10.1038/30918",
          "note": "Watts & Strogatz (1998) Nature 393:440 — collective dynamics of small-world networks; clustering coefficient"
        }
      ],
      "last_reviewed": "2026-05-05",
      "file": "cross-domain/social-science-mathematics/b-social-capital-network-science.yaml"
    },
    {
      "id": "b-social-mobility-markov-chain-transition-matrix",
      "title": "Social mobility across income or occupational classes is modeled as a Markov chain with a transition matrix P_{ij} representing the probability of moving from class i to class j across generations; the Markov eigenvalue structure determines long-run mobility rates, steady-state distributions, and whether a society converges to meritocracy or reproduces inequality.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "Let x_t be the class distribution vector at generation t; then x_{t+1} = P·x_t where P is a row-stochastic transition matrix (P_{ij} ≥ 0, ∑_j P_{ij} = 1). The long-run (steady-state) distribution π satisfies π = π·P (left eigenvector for eigenvalue λ=1). The second eigenvalue λ₂ determines convergen",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-wealth-concentration-slows-markov-mixing-time-poverty-traps"
      ],
      "communication_gap": "Sociologists and economists measuring social mobility use transition matrices empirically but often interpret them without Markov chain theory; mathematicians studying Markov mixing times and ergodic theory rarely apply their results to sociological data; the explicit spectral-gap analysis of real mobility matrices is a productive frontier that remains underexplored.\n",
      "translation_table": [
        {
          "field_a_term": "intergenerational mobility rate (sociology)",
          "field_b_term": "spectral gap 1-λ₂ of transition matrix (mathematics)",
          "note": "Faster societal mixing corresponds to larger spectral gap; slower mobility to λ₂ → 1"
        },
        {
          "field_a_term": "class distribution across generations (sociology)",
          "field_b_term": "iterated matrix-vector product x_t = P^t·x_0 (mathematics)",
          "note": "Class shares evolve by repeated application of the transition matrix"
        },
        {
          "field_a_term": "steady-state class distribution (sociology)",
          "field_b_term": "stationary distribution π of Markov chain (mathematics)",
          "note": "If P is ergodic, the class distribution converges to π regardless of starting state"
        },
        {
          "field_a_term": "intergenerational income elasticity IGE (sociology)",
          "field_b_term": "rate of convergence to stationary distribution (mathematics)",
          "note": "High IGE means slow convergence; IGE ≈ 0.5 in US, 0.2 in Scandinavia"
        }
      ],
      "references": [
        {
          "doi": "10.1257/aer.104.5.141",
          "note": "Chetty et al. (2014) - Where is the Land of Opportunity? (empirical US mobility matrices)"
        },
        {
          "doi": "10.1093/restud/rds048",
          "note": "Blanden et al. (2013) - intergenerational mobility and the Markov property"
        },
        {
          "doi": "10.1093/acprof:oso/9780195049015.001.0001",
          "note": "Shorrocks (1978) - the measurement of mobility (mathematical mobility indices)"
        }
      ],
      "last_reviewed": "2026-05-07",
      "file": "cross-domain/social-science-mathematics/b-social-mobility-markov-chain-transition-matrix.yaml"
    },
    {
      "id": "b-structural-holes-brokerage",
      "title": "Burt's structural holes bridge social science and mathematics: brokers who span disconnected network clusters gain information and control advantages quantified by the constraint measure C_i ΓÇö formalizing Granovetter's weak tie strength and Coleman's social capital closure in a unified network theory.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "Structural hole theory (Burt 1992) provides a mathematical theory of brokerage advantage. A structural hole exists between two groups when there is no direct connection between them ΓÇö the broker who bridges both groups occupies an advantageous position. Constraint measure C_i = ╬úΓ▒╝(pß╡óΓ▒╝ + ╬úΓ",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-brokerage-advantage-diminishes-with-organizational-transparency"
      ],
      "communication_gap": "Sociologists who developed structural hole theory (Burt) rarely engage with the graph theory literature on betweenness centrality and bridge detection. Computer scientists who develop network centrality algorithms rarely engage with the sociological interpretation and organizational implications of structural holes. Granovetter's weak tie paper (one of the most cited sociology papers) is rarely read by network scientists who use betweenness centrality as a purely mathematical measure.\n",
      "translation_table": [
        {
          "field_a_term": "structural hole (no path between two clusters except through broker)",
          "field_b_term": "graph-theoretic bridge (edge whose removal disconnects components)",
          "note": "structural holes are not literally absent ties, but redundant paths; Burt's definition is softer"
        },
        {
          "field_a_term": "constraint C_i (Burt 1992)",
          "field_b_term": "measure of ego network redundancy; C_i = 1 for clique membership",
          "note": "low constraint ΓåÆ high structural holes ΓåÆ brokerage advantage; measurable from sociomatrix"
        },
        {
          "field_a_term": "weak tie (infrequent contact with low emotional intensity)",
          "field_b_term": "bridge edge in graph (connecting otherwise disconnected components)",
          "note": "weak ties are probabilistically more likely to be bridges ΓÇö but not necessarily; the strength of weak ties is statistical"
        },
        {
          "field_a_term": "betweenness centrality BC(v)",
          "field_b_term": "fraction of geodesic paths (shortest paths) passing through v",
          "note": "high BC = broker; BC computed in O(VE) time (Brandes 2001); high BC individuals tend to have low constraint"
        },
        {
          "field_a_term": "network closure (Coleman social capital)",
          "field_b_term": "clustering coefficient C = triangles / possible triangles around ego",
          "note": "dense triangles enable norm enforcement; bonding social capital; trades off with brokerage"
        }
      ],
      "references": [
        {
          "doi": "10.1086/225469",
          "note": "Granovetter (1973) The strength of weak ties; Am J Sociol 78:1360"
        },
        {
          "note": "Burt (1992) Structural Holes ΓÇö The Social Structure of Competition; Harvard University Press"
        },
        {
          "doi": "10.1086/421787",
          "note": "Burt (2004) Structural holes and good ideas; Am J Sociol 110:349"
        },
        {
          "doi": "10.1086/228943",
          "note": "Coleman (1988) Social capital in the creation of human capital; Am J Sociol 94:S95"
        }
      ],
      "last_reviewed": "2026-05-07",
      "file": "cross-domain/social-science-mathematics/b-structural-holes-brokerage.yaml"
    },
    {
      "id": "b-voter-model-consensus",
      "title": "The voter model (Clifford & Sudbury 1973) — each agent copies a random neighbor's opinion — maps opinion dynamics onto random walk theory: consensus in d≤2 dimensions, persistent diversity in d>2, T∝N·lnN in 2D, and echo-chamber polarization as network-structured metastable trapping.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "The voter model is the simplest model of social influence and opinion dynamics, yet it reduces exactly to classical problems in probability theory and statistical physics.\n1. Voter model definition. N agents on a graph, each holding opinion ±1.\n   At each timestep, a random agent i copies the opinio",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-network-community-structure-drives-polarization"
      ],
      "communication_gap": "Sociologists studying opinion dynamics and statistical physicists studying the voter model are largely disconnected. The model is well-known in mathematical ecology (it originated as a species competition model), in statistical physics (as a non-equilibrium stochastic model), and increasingly in computational social science, but the exact equivalence between the voter model, the Moran process, and random walks is rarely appreciated in all three communities simultaneously.\n",
      "translation_table": [
        {
          "field_a_term": "opinion ±1 of agent i (voter model)",
          "field_b_term": "allele A/a at locus of individual i (population genetics / Moran)",
          "note": "Exact mathematical equivalence; voter model IS the Moran process"
        },
        {
          "field_a_term": "social influence (copying neighbor's opinion)",
          "field_b_term": "genetic drift (random replacement by neighboring lineage)",
          "note": "Both processes are pure random copying without selection pressure"
        },
        {
          "field_a_term": "consensus state (all +1 or all −1) in voter model",
          "field_b_term": "fixation of one allele in Moran process (genetics)",
          "note": "Absorbing states are identical; fixation probability = initial frequency in both"
        },
        {
          "field_a_term": "domain wall (interface between +1 and −1 clusters)",
          "field_b_term": "allele frequency gradient / clinal variation (genetics)",
          "note": "Both represent the boundary between differing discrete states"
        },
        {
          "field_a_term": "consensus time T ∝ N ln N on 2D lattice (voter model)",
          "field_b_term": "fixation time of neutral allele T ∝ N ln N on 2D structured population",
          "note": "Same mathematical result from same random walk calculation"
        },
        {
          "field_a_term": "echo chamber / polarized metastable state (social science)",
          "field_b_term": "long-lived metastable state in community-structured network (physics)",
          "note": "Community structure creates exponentially long-lived polarized states"
        }
      ],
      "references": [
        {
          "doi": "10.2307/2334957",
          "note": "Clifford & Sudbury (1973) Biometrika 60:581 — a model for spatial conflict"
        },
        {
          "doi": "10.1103/PhysRevLett.94.178701",
          "note": "Sood & Redner (2005) Phys Rev Lett 94:178701 — voter model on heterogeneous graphs"
        },
        {
          "doi": "10.1088/1751-8113/41/2/025001",
          "note": "Sood et al. (2008) J Phys A 41:025001 — voter models on heterogeneous graphs"
        },
        {
          "doi": "10.1103/RevModPhys.81.591",
          "note": "Castellano et al. (2009) Rev Mod Phys 81:591 — statistical physics of social dynamics"
        }
      ],
      "last_reviewed": "2026-05-06",
      "file": "cross-domain/social-science-mathematics/b-voter-model-consensus.yaml"
    },
    {
      "id": "b-voting-theory-social-choice",
      "title": "Arrow's impossibility theorem — no voting system with ≥3 candidates satisfies Pareto efficiency, independence of irrelevant alternatives, and non-dictatorship simultaneously — and the Gibbard-Satterthwaite theorem that any reasonable voting rule is strategically manipulable, transform political science questions about democratic design into solved theorems in social choice mathematics.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "Arrow's impossibility theorem (1951, Nobel Prize in Economics 1972) is one of the most striking results in all of social science: it proves, by rigorous mathematical argument, that no voting system for ≥3 candidates can simultaneously satisfy three apparently minimal conditions: (P) Pareto efficienc",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-approval-voting-reduces-strategic-manipulation-vs-plurality"
      ],
      "communication_gap": "Arrow's theorem is widely known in economics but poorly taught in political science, despite being directly relevant to electoral system design. The Gibbard-Satterthwaite theorem is almost unknown outside economic theory, despite its direct implication that strategic voting is unavoidable. Voting system reformers (advocates of ranked-choice voting, approval voting, etc.) often argue for their preferred system without knowing the mathematical constraints. Conversely, mathematical social choice theorists rarely engage with empirical electoral data or practical reform debates.\n",
      "translation_table": [
        {
          "field_a_term": "Pareto efficiency (welfare economics)",
          "field_b_term": "Universal dominance criterion for collective preference",
          "note": "If everyone agrees, society agrees — the least controversial democratic condition"
        },
        {
          "field_a_term": "Independence of irrelevant alternatives (Arrow condition)",
          "field_b_term": "Pairwise binary consistency of social ranking",
          "note": "The condition most often sacrificed in practice — violated by Borda count"
        },
        {
          "field_a_term": "Condorcet winner (beats all others in pairwise majority)",
          "field_b_term": "Median voter outcome under single-peaked preferences (Black's theorem)",
          "note": "Exists if and only if preferences are single-peaked on a line"
        },
        {
          "field_a_term": "Gibbard-Satterthwaite manipulability",
          "field_b_term": "Strategic voting (e.g. voting for a lesser evil in plurality voting)",
          "note": "Every non-dictatorial voting rule has some preference profile where strategic voting helps"
        },
        {
          "field_a_term": "Social welfare function (Arrow's framework)",
          "field_b_term": "Voting system (aggregating individual rankings to social ranking)",
          "note": "Arrow formalises all voting systems as social welfare functions"
        },
        {
          "field_a_term": "Single-peaked preferences on a line (Black's domain restriction)",
          "field_b_term": "Spatial voting model (left-right political spectrum)",
          "note": "Single-peakedness is empirically plausible for policy issues on one dimension"
        }
      ],
      "references": [
        {
          "note": "Arrow (1951) Social Choice and Individual Values, Wiley — proof of impossibility theorem; 2nd edition (1963) adds additional discussion of conditions\n"
        },
        {
          "doi": "10.2307/1914085",
          "note": "Gibbard (1973) Manipulation of voting schemes: a general result, Econometrica 41:587 — proves any non-dictatorial voting rule is manipulable\n"
        },
        {
          "doi": "10.2307/1961957",
          "note": "Black (1948) On the rationale of group decision-making, J Polit Econ 56:23 — median voter theorem under single-peaked preferences\n"
        },
        {
          "note": "Saari (2008) Disposing Dictators, Demystifying Voting Paradoxes, Cambridge — geometric analysis of voting paradoxes using symmetry groups\n"
        }
      ],
      "last_reviewed": "2026-05-06",
      "file": "cross-domain/social-science-mathematics/b-voting-theory-social-choice.yaml"
    },
    {
      "id": "b-wisdom-of-crowds-condorcet",
      "title": "Crowd accuracy on estimation tasks follows the Condorcet jury theorem: aggregate error decreases as 1/√N for independent unbiased estimates, connecting collective intelligence to probability theory",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "The Condorcet jury theorem (1785) states: if N voters each independently choose the correct answer with probability p > 0.5, then the probability that the majority votes correctly approaches 1 as N→∞. For continuous estimation tasks (e.g., weight of an ox, GDP forecast), if each individual estimate ",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-wisdom-of-crowds-condorcet"
      ],
      "communication_gap": "Social scientists who study collective intelligence cite Galton's ox-weighing demonstration (1907) and Surowiecki's popular book but rarely connect to the formal statistical theory (optimal aggregation, correlated estimators). Statisticians who work on combining expert forecasts and prediction intervals have the relevant machinery but publish in statistics journals not in organizational behavior or social psychology.\n",
      "translation_table": [
        {
          "field_a_term": "crowd aggregate estimate x̄ (average of N independent estimates)",
          "field_b_term": "sample mean estimator — MVUE for E[x_i] under iid Gaussian noise",
          "note": "Under CLT, x̄ ~ N(θ, σ²/N) regardless of individual distribution shape"
        },
        {
          "field_a_term": "individual expertise variation (accuracy heterogeneity)",
          "field_b_term": "heterogeneous σ_i² in weighted average — optimal weight w_i ∝ 1/σ_i²",
          "note": "Optimal aggregation weights experts with lower variance more heavily"
        },
        {
          "field_a_term": "social influence / herding (information cascade)",
          "field_b_term": "correlation ρ between ε_i and ε_j — Var(x̄) = σ²[1+(N-1)ρ]/N",
          "note": "With correlation ρ, variance floors at σ²ρ rather than 0 — herding destroys crowd wisdom"
        },
        {
          "field_a_term": "prediction market price (continuous crowd aggregation)",
          "field_b_term": "dynamically updated sufficient statistic for the crowd's collective belief",
          "note": "Arrow securities price equals the probability assigned by the informed crowd"
        }
      ],
      "references": [
        {
          "doi": "10.1038/075450a0",
          "note": "Galton (1907) Vox Populi. Nature 75:450 — original crowd wisdom demonstration (ox weight)"
        },
        {
          "doi": "10.1007/BF00122574",
          "note": "Condorcet (1785) Essay on the Application of Analysis to the Probability of Majority Decisions"
        },
        {
          "doi": "10.1016/j.joep.2005.06.002",
          "note": "Lorenz et al. (2011) How social influence can undermine the wisdom of crowds effect. PNAS 108:9020"
        }
      ],
      "last_reviewed": "2026-05-07",
      "file": "cross-domain/social-science-mathematics/b-wisdom-of-crowds-condorcet.yaml"
    },
    {
      "id": "b-homophily-assortative-mixing",
      "title": "Social network homophily — the tendency for similar individuals to form ties — is quantified as assortativity mixing in network science, and the configuration model provides a null distribution against which observed homophily can be tested, revealing whether similarity clustering is driven by choice, opportunity, or network structure.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "proposed",
      "bridge_claim": "\"Birds of a feather flock together\" — homophily is one of the most robust findings in social science (McPherson et al. 2001). Network science formalises this as assortativity: the Pearson correlation coefficient r of node attribute values across connected pairs. The configuration model (Newman 2002)",
      "open_unknowns": [],
      "related_hypotheses": [],
      "communication_gap": "",
      "translation_table": [
        {
          "field_a_term": "assortativity coefficient r",
          "field_b_term": "social homophily index",
          "note": "r = 1: perfect homophily; r = 0: random mixing; r = -1: perfect heterophily (bipartite)"
        },
        {
          "field_a_term": "configuration model null",
          "field_b_term": "baseline expected homophily from degree sequence alone",
          "note": "Corrects for the structural tendency of hubs to interconnect"
        },
        {
          "field_a_term": "modularity Q = sum_ij [A_ij - k_i*k_j/(2m)] delta(c_i, c_j)",
          "field_b_term": "within-group tie excess relative to random expectation",
          "note": "Community structure is precisely the network-science operationalisation of social group cohesion"
        },
        {
          "field_a_term": "spreading threshold on assortative network",
          "field_b_term": "social contagion threshold for behavior change",
          "note": "Assortative mixing creates echo chambers that lower the threshold for opinion cascades within communities"
        }
      ],
      "references": [
        {
          "doi": "10.1146/annurev.soc.27.1.415",
          "note": "McPherson et al. (2001) - Birds of a feather: homophily in social networks"
        },
        {
          "doi": "10.1103/PhysRevE.67.026126",
          "note": "Newman (2003) — mixing patterns in networks"
        },
        {
          "doi": "10.1103/PhysRevE.70.056131",
          "note": "Newman (2004) — detecting community structure in networks"
        }
      ],
      "last_reviewed": "2026-05-07",
      "file": "cross-domain/social-science-network-science/b-homophily-assortative-mixing.yaml"
    },
    {
      "id": "b-homophily-structural-segregation",
      "title": "Homophily and structural segregation — the tendency of similar individuals to connect produces modular networks that are the mathematical basis of filter bubbles and information siloing",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "Homophily — the tendency of similar individuals to form ties (\"birds of a feather flock together\") — is the dominant structural force shaping social networks. Measured by the assortativity coefficient r = (Σ_jk jk·e_jk - [Σ_j j·q_j]²) / (σ²_q), where e_jk is the fraction of edges between nodes of ty",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-network-assortativity-predicts-misinformation-spread-rate"
      ],
      "communication_gap": "Sociologists developed homophily theory empirically (McPherson et al.) while network scientists developed assortativity theory mathematically (Newman 2002) — the explicit connection was made only in the mid-2000s. The political science and communication studies literature on filter bubbles rarely uses the assortativity formalism despite it providing quantitative predictions. Pariser's (2011) popular account lacked the mathematical framework, limiting its scientific uptake.\n",
      "translation_table": [
        {
          "field_a_term": "assortativity coefficient r",
          "field_b_term": "Pearson correlation of node degrees across edges",
          "note": "r ∈ [-1,1]; social networks typically r > 0 (assortative); biological networks r < 0"
        },
        {
          "field_a_term": "homophily (preference for similar alters)",
          "field_b_term": "positive degree-degree correlation",
          "note": "Multiple dimensions: race, education, age, opinion — all produce independent assortativity terms"
        },
        {
          "field_a_term": "modularity Q (fraction of edges within groups above random)",
          "field_b_term": "community structure strength",
          "note": "Q > 0.3 typically indicates meaningful community structure; maximized by Louvain algorithm"
        },
        {
          "field_a_term": "echo chamber (within-group information amplification)",
          "field_b_term": "high spectral gap between within/between community mixing rates",
          "note": "Information propagates fast within group (short path), slow between groups (long bridges)"
        },
        {
          "field_a_term": "filter bubble (outgroup information filtered)",
          "field_b_term": "bottleneck edges (bridges) between network communities",
          "note": "Removing few bridge edges disconnects communities — structural basis of information gatekeeping"
        },
        {
          "field_a_term": "Schelling tipping model (weak preference → strong segregation)",
          "field_b_term": "phase transition in network modularity under local rewiring",
          "note": "Even mild homophily preference drives network to highly segregated state — nonlinear tipping"
        }
      ],
      "references": [
        {
          "doi": "10.1146/annurev.soc.27.1.415",
          "note": "McPherson et al. (2001) Annu Rev Sociol 27:415 — homophily in social networks: review"
        },
        {
          "doi": "10.1103/PhysRevLett.89.208701",
          "note": "Newman (2002) Phys Rev Lett 89:208701 — assortative mixing in networks"
        },
        {
          "note": "Pariser (2011) The Filter Bubble (Penguin Press, ISBN 1594203008) — popular account of algorithmic echo chambers"
        },
        {
          "doi": "10.1086/521848",
          "note": "Centola et al. (2007) Am J Sociol 113:702 — homophily, cultural drift, and echo chambers"
        }
      ],
      "last_reviewed": "2026-05-06",
      "file": "cross-domain/social-science-network-science/b-homophily-structural-segregation.yaml"
    },
    {
      "id": "b-social-capital-network-centrality",
      "title": "Bourdieu's social capital — resources available through social networks — maps precisely onto network centrality measures: betweenness centrality captures brokerage capital (Burt's structural holes), eigenvector centrality captures prestige capital, and the Gini coefficient of the degree distribution measures inequality in social capital access.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "Bourdieu (1986) defined social capital as \"the aggregate of the actual or potential resources which are linked to possession of a durable network of more or less institutionalized relationships of mutual acquaintance and recognition.\" Network science provides precise mathematical operationalisations",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-structural-holes-income-mobility-mediation"
      ],
      "communication_gap": "Sociologists who use Bourdieu's social capital framework rarely quantify networks mathematically; network scientists rarely engage with sociological theory. Surveys measuring social capital (e.g., General Social Survey name-generators) produce data that could be analysed with full network centrality tools but are typically summarised only as degree counts. The full connection between sociology's theoretical concepts and network science's mathematical toolkit was made explicit by Burt (1992) but remains incompletely integrated in either discipline's training.\n",
      "translation_table": [
        {
          "field_a_term": "social capital (Bourdieu)",
          "field_b_term": "network centrality measures (betweenness, eigenvector, clustering)",
          "note": "Different types of social capital correspond to different centrality measures"
        },
        {
          "field_a_term": "brokerage capital / structural holes (Burt)",
          "field_b_term": "betweenness centrality / low constraint index",
          "note": "A broker between disconnected groups has high betweenness and low constraint"
        },
        {
          "field_a_term": "prestige capital",
          "field_b_term": "eigenvector centrality / PageRank",
          "note": "Prestige comes from being connected to prestigious others — the defining property of eigenvector centrality"
        },
        {
          "field_a_term": "bonding capital (Coleman — tight-knit communities)",
          "field_b_term": "clustering coefficient / network modularity",
          "note": "High clustering coefficient = many triangles = dense local community = bonding capital"
        },
        {
          "field_a_term": "social stratification / inequality",
          "field_b_term": "Gini coefficient of degree distribution",
          "note": "Scale-free networks have highly unequal degree distributions mirroring social capital inequality"
        }
      ],
      "references": [
        {
          "note": "Bourdieu (1986) The forms of capital. In Handbook of Theory and Research for the Sociology of Education. Greenwood Press. -- social capital theory"
        },
        {
          "note": "Burt (1992) Structural Holes. Harvard University Press. -- structural holes and brokerage capital"
        },
        {
          "doi": "10.2307/3033543",
          "note": "Freeman, L.C. (1977). A set of measures of centrality based on betweenness. Sociometry 40:35–41."
        },
        {
          "doi": "10.1038/30918",
          "note": "Watts, D.J. & Strogatz, S.H. (1998). Collective dynamics of 'small-world' networks. Nature 393:440–442."
        }
      ],
      "last_reviewed": "2026-05-06",
      "file": "cross-domain/social-science-network-science/b-social-capital-network-centrality.yaml"
    },
    {
      "id": "b-cascade-failures-interdependent-networks",
      "title": "Interdependent network theory (Buldyrev et al. 2010) shows that mutual dependencies between coupled infrastructure networks (power grid ↔ communication network) convert continuous second-order percolation transitions into abrupt first-order cascades, with direct application to the 2003 Italy blackout and financial systemic risk.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "Standard percolation theory predicts that as nodes fail in a random network, the giant connected component shrinks continuously (second-order phase transition) with a critical threshold p_c = 1/<k> for Erdős-Rényi graphs. Buldyrev et al. (2010) showed that when two networks are interdependent — each",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-interdependency-drives-first-order-failure"
      ],
      "communication_gap": "Network physicists publish in Physical Review Letters and Nature Physics; infrastructure engineers and policymakers read IEEE Transactions on Power Systems and IEEE Systems Man Cybernetics. The mathematical formalism of percolation theory is rarely taught in engineering programs. Post-event analyses of major blackouts (Italy 2003, Texas 2021) are conducted by engineering commissions that do not cite the physics literature.\n",
      "translation_table": [
        {
          "field_a_term": "percolation threshold p_c (fraction of nodes that can fail before network collapse)",
          "field_b_term": "systemic risk threshold in infrastructure or financial networks",
          "note": "lower p_c = more fragile; interdependency dramatically lowers p_c"
        },
        {
          "field_a_term": "second-order percolation transition (continuous collapse)",
          "field_b_term": "gradual degradation of a single isolated network"
        },
        {
          "field_a_term": "first-order percolation transition (abrupt collapse)",
          "field_b_term": "catastrophic cascade failure in interdependent systems",
          "note": "the key new result — interdependency converts gradual to abrupt failure"
        },
        {
          "field_a_term": "giant connected component (GCC)",
          "field_b_term": "functional core of an infrastructure or financial system"
        },
        {
          "field_a_term": "interdependency links (between-network edges)",
          "field_b_term": "operational dependencies between infrastructure sectors",
          "note": "e.g., power grid nodes need communication; communication nodes need power"
        },
        {
          "field_a_term": "degree of interdependency q (fraction of interdependent nodes)",
          "field_b_term": "coupling strength between infrastructure sectors"
        }
      ],
      "references": [
        {
          "doi": "10.1038/nature08932",
          "note": "Buldyrev et al. (2010) — Catastrophic cascade of failures in interdependent networks; Nature 464:1025"
        },
        {
          "doi": "10.1038/nphys2180",
          "note": "Gao et al. (2012) — Networks formed from interdependent networks; Nat Phys 8:40"
        },
        {
          "doi": "10.1038/464984a",
          "note": "Vespignani (2010) — The fragility of interdependency; Nature 464:984"
        },
        {
          "doi": "10.1038/nphys2727",
          "note": "Bashan et al. (2013) — The extreme vulnerability of interdependent spatially embedded networks; Nat Phys 9:667"
        }
      ],
      "last_reviewed": "2026-05-06",
      "file": "cross-domain/social-science-physics/b-cascade-failures-interdependent-networks.yaml"
    },
    {
      "id": "b-complexity-economics-far-equilibrium",
      "title": "Complexity economics treats markets as far-from-equilibrium dissipative systems driven by inductive agent strategies — the El Farol minority game, Schumpeterian creative destruction, and QWERTY path dependence all emerge from the same positive- feedback and self-organised criticality physics that governs phase transitions.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "Standard economics assumes markets reach Walrasian general equilibrium via tatonnement — a price-adjustment process that requires agents to have rational expectations and an auctioneer to coordinate. Complexity economics (Arthur 1994; Santa Fe Institute) abandons both: agents form inductive models (",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-complexity-economics-minority-game-market-ecology"
      ],
      "communication_gap": "Economics departments teach Walrasian equilibrium theory as the foundation; complexity economics and agent-based modelling are treated as heterodox and are rarely included in mainstream PhD curricula. Physicists who know self-organised criticality and far-from-equilibrium thermodynamics do not typically read economic journals; economists do not read physics journals. The Santa Fe Institute cross-disciplinary programme has partly bridged this gap, but the synthesis has not entered mainstream economics teaching.\n",
      "translation_table": [
        {
          "field_a_term": "logistic equation dN/dt = rN(1-N/K)",
          "field_b_term": "technology S-curve adoption (market penetration over time)",
          "note": "Same equation governs bacterial growth, predator-prey, epidemics, and technology diffusion"
        },
        {
          "field_a_term": "phase transition with symmetry breaking",
          "field_b_term": "path-dependent technology lock-in (positive feedback → single attractor)",
          "note": "QWERTY lock-in is analogous to a ferromagnet below Tc: many equivalent states but system freezes in one"
        },
        {
          "field_a_term": "self-organised criticality (SOC)",
          "field_b_term": "market ecology of strategies near El Farol equilibrium",
          "note": "Agent strategy space self-organises to a critical state where predictability is maximally exploited"
        },
        {
          "field_a_term": "dissipative structure (Prigogine) far from thermodynamic equilibrium",
          "field_b_term": "evolving market with continuous Schumpeterian creative destruction",
          "note": "Both require constant energy/resource flux to maintain their far-from-equilibrium organised state"
        },
        {
          "field_a_term": "minority game (anti-coordination)",
          "field_b_term": "El Farol bar problem (go only if you predict fewer than 60% will attend)",
          "note": "Minority games have been solved analytically; their agent-strategy phase diagram matches market microstructure stylised facts"
        }
      ],
      "references": [
        {
          "note": "Arthur, W.B. (1994) Increasing Returns and Path Dependence in the Economy. University of Michigan Press."
        },
        {
          "doi": "10.2307/2234208",
          "note": "Arthur, W.B. (1989) Econ J 99:116 — competing technologies, increasing returns, and lock-in by historical events"
        },
        {
          "note": "Schumpeter, J.A. (1942) Capitalism, Socialism and Democracy. Harper & Brothers."
        },
        {
          "note": "Foster, J. & Metcalfe, J.S. (eds.) (2001) Frontiers of Evolutionary Economics. Edward Elgar."
        }
      ],
      "last_reviewed": "2026-05-06",
      "file": "cross-domain/social-science-physics/b-complexity-economics-far-equilibrium.yaml"
    },
    {
      "id": "b-complexity-emergence-social-systems",
      "title": "Complexity and Emergence in Social Systems — self-organised criticality, power laws, and the edge of chaos describe cities, economies, and civilisations as complex adaptive systems",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "Cities, economies, and civilisations exhibit emergent order arising from local interactions without central control — hallmarks of complex adaptive systems (CAS). The edge of chaos (Kauffman 1993; Langton 1990): complex systems are maximally adaptive and innovative near the phase transition between ",
      "open_unknowns": [],
      "related_hypotheses": [],
      "communication_gap": "The physics of complex systems community (largely connected to the Santa Fe Institute and the Journal of Statistical Mechanics) produces rigorous models of social phenomena, but mainstream social scientists remain sceptical of physics-derived analogies to human behaviour. The key objection — that human agents have intentionality, culture, and institutions absent from physical systems — is valid but should motivate hybrid models rather than disciplinary insularity. The most productive bridges have come from economists willing to engage complexity science (Arthur, Farmer, Haldane) and physicists willing to engage social theory.\n",
      "translation_table": [
        {
          "field_a_term": "self-organised criticality (SOC)",
          "field_b_term": "spontaneous drift of a driven social/economic system to a critical state",
          "note": "Bak sandpile: no parameter tuning needed; power-law event distribution is the attractor"
        },
        {
          "field_a_term": "power law P(x) ∝ x^{−α} (Zipf, Pareto)",
          "field_b_term": "scale-free size distribution of cities, firms, wealth, or social networks",
          "note": "Lack of a characteristic scale implies the same dynamics govern small and large events — policy interventions must account for tail risk"
        },
        {
          "field_a_term": "edge of chaos (phase transition between order and chaos)",
          "field_b_term": "optimal operating regime for innovation and adaptability in organisations",
          "note": "Too much order → rigid; too much chaos → incoherent; the critical point maximises information integration"
        },
        {
          "field_a_term": "path dependence (increasing returns, lock-in)",
          "field_b_term": "historical contingency in technology and institutional adoption",
          "note": "Network externalities and switching costs can maintain suboptimal standards; policy must overcome basin of attraction"
        },
        {
          "field_a_term": "agent-based model (ABM) with heterogeneous interacting agents",
          "field_b_term": "simulation of emergent social phenomena from micro-level decision rules",
          "note": "Santa Fe Institute economics: ABMs reveal emergence invisible to representative-agent DSGE models"
        },
        {
          "field_a_term": "scale-free network (preferential attachment)",
          "field_b_term": "rich-get-richer dynamics in wealth, citations, and social influence",
          "note": "Barabási–Albert (1999) model: new nodes attach preferentially to high-degree nodes → power-law degree distribution"
        }
      ],
      "references": [
        {
          "doi": "10.1103/PhysRevLett.59.381",
          "note": "Bak, Tang & Wiesenfeld (1987) Phys Rev Lett 59:381 — self-organised criticality"
        },
        {
          "note": "Kauffman (1993) The Origins of Order — fitness landscapes and the edge of chaos"
        },
        {
          "note": "Zipf (1949) Human Behavior and the Principle of Least Effort — rank-frequency laws"
        },
        {
          "doi": "10.1126/science.284.5411.107",
          "note": "Arthur (1999) Science 284:107 — complexity and the economy"
        }
      ],
      "last_reviewed": "2026-05-06",
      "file": "cross-domain/social-science-physics/b-complexity-emergence-social-systems.yaml"
    },
    {
      "id": "b-econophysics-wealth-distribution",
      "title": "Pareto's power-law wealth distribution P(w>x) ∝ x^{-α} (α≈1.5) emerges from Bouchaud-Mézard multiplicative noise models analogous to Boltzmann-Gibbs statistics, while Piketty's r>g inequality reproduces the physicist's condition for unbounded variance growth in a multiplicative stochastic process.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "Pareto (1897) observed empirically that wealth w follows a power-law complementary CDF: P(w>x) ∝ x^{-α}, with α ≈ 1.5–2.0 for most countries (Pareto index). The richest 20% hold ~80% of wealth (80/20 rule). Econophysics provides a mechanistic derivation: treat agents as molecules exchanging \"wealth ",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-multiplicative-noise-pareto-exponent-capital-tax-rate"
      ],
      "communication_gap": "Econophysics papers appear in physics journals (Physica A, Eur Phys J B) and are largely ignored by mainstream economics journals (AER, JPE, QJE), which require microfoundation models with rational agents and utility maximization. Economists distrust the \"gas molecule\" analogy as too simplistic; physicists find the equilibrium- rationality assumptions of economics equally implausible. Piketty's Capital (2014) was an economics bestseller but did not cite the Bouchaud-Mézard multiplicative noise model that independently derived the same r>g condition from statistical mechanics — a significant missed connection.\n",
      "translation_table": [
        {
          "field_a_term": "energy in Boltzmann-Gibbs statistics (exponential distribution)",
          "field_b_term": "labor income in random additive exchange models"
        },
        {
          "field_a_term": "multiplicative noise (geometric Brownian motion) in stochastic dynamics",
          "field_b_term": "capital returns r_i w_i (return proportional to existing wealth)"
        },
        {
          "field_a_term": "power-law exponent α of Boltzmann energy tail",
          "field_b_term": "Pareto index α of wealth distribution tail"
        },
        {
          "field_a_term": "variance divergence condition σ² > ρ (multiplicative noise overwhelms drift)",
          "field_b_term": "Piketty r > g (capital return exceeds economic growth)"
        },
        {
          "field_a_term": "Gini coefficient G = 1/(2α−1) from Pareto distribution",
          "field_b_term": "inequality index measurable in national income surveys"
        },
        {
          "field_a_term": "predator-prey Lotka-Volterra dynamics",
          "field_b_term": "wealth redistribution through taxation (predation on capital)"
        }
      ],
      "references": [
        {
          "note": "Pareto (1897) Cours d'économie politique — original Pareto distribution"
        },
        {
          "doi": "10.1016/S0378-4371(00)00205-3",
          "note": "Bouchaud & Mézard (2000) Physica A 282:536 — wealth condensation in a simple model"
        },
        {
          "doi": "10.1103/RevModPhys.81.1703",
          "note": "Yakovenko & Rosser (2009) Rev Mod Phys 81:1703 — colloquium on statistical mechanics of money"
        },
        {
          "note": "Piketty (2014) Capital in the Twenty-First Century — Harvard University Press"
        }
      ],
      "last_reviewed": "2026-05-06",
      "file": "cross-domain/social-science-physics/b-econophysics-wealth-distribution.yaml"
    },
    {
      "id": "b-opinion-dynamics-ising",
      "title": "Opinion dynamics models (Voter, Sznajd, Deffuant) are instances of Ising-like spin dynamics on social networks: political polarisation is a ferromagnetic phase transition, echo chambers are ferromagnetic domains, and the critical temperature T_c predicts the consensus-to- fragmentation transition.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "proposed",
      "bridge_claim": "The Ising model describes interacting binary spins σ_i ∈ {-1, +1} on a lattice with Hamiltonian H = -J Σ_{ij} σ_i σ_j - h Σ_i σ_i. The ferromagnetic phase transition at T_c separates two phases: - T < T_c (ordered, ferromagnetic) -- spins align → consensus / polarisation - T > T_c (disordered, param",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-social-ising-polarization-transition",
        "h-polarisation-ising-phase-transition",
        "h-norm-cascade-ising-ew"
      ],
      "communication_gap": "Physicists developed the Ising model for magnetism from 1920 onwards. Sociologists and political scientists developed opinion dynamics models independently in the 1990s-2000s, largely unaware of the exact correspondence with statistical mechanics. Castellano et al.'s 2009 Reviews of Modern Physics article made the connection explicit and accessible, but adoption in political science remains limited because the language of phase transitions, order parameters, and universality classes is not standard in social science training.\n",
      "translation_table": [
        {
          "field_a_term": "Ising spin σ_i ∈ {-1, +1}",
          "field_b_term": "Binary political opinion o_i (e.g., liberal/conservative)",
          "note": "Discrete opinion maps exactly to spin; continuous opinion requires XY or Heisenberg generalization"
        },
        {
          "field_a_term": "Ferromagnetic coupling J_{ij} > 0",
          "field_b_term": "Social influence between agents i and j",
          "note": "Homophily increases effective J; heterophily decreases it"
        },
        {
          "field_a_term": "External magnetic field h",
          "field_b_term": "Media influence, institutional framing, propaganda",
          "note": "Biases all agents toward a particular opinion direction"
        },
        {
          "field_a_term": "Temperature T (= inverse social conformity pressure)",
          "field_b_term": "Individual-level independence / contrarianism",
          "note": "High T = individuals resist social influence; low T = strong conformity"
        },
        {
          "field_a_term": "Ferromagnetic phase transition at T_c",
          "field_b_term": "Polarisation transition at critical homophily threshold",
          "note": "Sharp transition from pluralism to polarisation as T drops below T_c"
        },
        {
          "field_a_term": "Order parameter m = ⟨σ⟩ (magnetisation)",
          "field_b_term": "Mean opinion / degree of societal polarisation",
          "note": "m = 0 is pluralism; |m| → 1 is full polarisation"
        },
        {
          "field_a_term": "Ferromagnetic domain",
          "field_b_term": "Echo chamber / partisan filter bubble",
          "note": "Spatial region of aligned spins / densely connected community of like-minded agents"
        },
        {
          "field_a_term": "Domain wall (interface between domains)",
          "field_b_term": "Cross-partisan bridge tie in social network",
          "note": "High-energy configuration; destroyed by increasing conformity (decreasing T)"
        },
        {
          "field_a_term": "Correlation length ξ diverging at T_c",
          "field_b_term": "Spatial range of opinion clusters diverging at polarisation transition",
          "note": "Near the transition, opinion fluctuations are correlated across the whole society"
        }
      ],
      "references": [
        {
          "doi": "10.1142/S0129183100000936",
          "note": "Sznajd-Weron & Sznajd (2000) Int J Mod Phys C 11:1157 — Sznajd model of opinion dynamics"
        },
        {
          "doi": "10.1142/S0219525900000078",
          "note": "Deffuant et al. (2000) Advances in Complex Systems 3:87 — bounded-confidence model"
        },
        {
          "doi": "10.1103/RevModPhys.81.591",
          "note": "Castellano, Fortunato & Loreto (2009) Rev Mod Phys 81:591 — comprehensive review of statistical physics of social dynamics"
        },
        {
          "doi": "10.1140/epjb/e2002-00359-2",
          "note": "Galam (2002) Eur Phys J B 25:403 — minority opinion spreading and Ising analogy"
        }
      ],
      "last_reviewed": "2026-05-05",
      "file": "cross-domain/social-science-physics/b-opinion-dynamics-ising.yaml"
    },
    {
      "id": "b-schelling-ising-dynamics",
      "title": "Schelling's residential segregation model is formally equivalent to an antiferromagnetic Ising model at finite temperature — Glauber dynamics at tolerance T produces the Ising phase diagram, and segregation emerges as a magnetic ordering transition even with mild preferences.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "Schelling's segregation model (1971): agents of two types (red/blue) on a grid are \"satisfied\" when at least fraction τ of their neighbors are the same type; unsatisfied agents move to a random empty location. Result: even with τ < 0.5 (mild preference — most agents prefer a mixed neighborhood), the",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-urban-segregation-ising-phase-transition-test"
      ],
      "communication_gap": "Schelling (1971) was an economist using spatial thought experiments. The Ising equivalence was demonstrated in 2006 by Vinković (physicist) and Kirman (economist) in PNAS — a rare economics-physics collaboration. Most sociology and urban planning literature on residential segregation still treats Schelling as a game-theoretic or social-dynamic model without invoking the Ising framework that provides its exact analytical solution and universal predictions.\n",
      "translation_table": [
        {
          "field_a_term": "Ising spin s_i = ±1",
          "field_b_term": "Schelling agent type (red = +1, blue = −1)"
        },
        {
          "field_a_term": "ferromagnetic coupling J > 0",
          "field_b_term": "preference for same-type neighbors"
        },
        {
          "field_a_term": "temperature T = 1/β",
          "field_b_term": "inverse tolerance (1/τ) — how strongly agents prefer same-type neighbors"
        },
        {
          "field_a_term": "Glauber dynamics (thermal fluctuation-driven spin flips)",
          "field_b_term": "Schelling agent relocation (unsatisfied agents move)"
        },
        {
          "field_a_term": "Ising phase transition at T_c (paramagnetic → ferromagnetic)",
          "field_b_term": "Schelling segregation transition at critical tolerance τ_c"
        },
        {
          "field_a_term": "ordered ferromagnetic phase (aligned domains)",
          "field_b_term": "segregated residential neighborhoods (like-with-like clusters)"
        },
        {
          "field_a_term": "paramagnetic phase (disordered spins, T > T_c)",
          "field_b_term": "integrated mixed neighborhoods (high tolerance regime)"
        },
        {
          "field_a_term": "correlation length ξ ∝ |T − T_c|^{−ν}",
          "field_b_term": "average segregated cluster size near the tolerance transition"
        }
      ],
      "references": [
        {
          "doi": "10.1080/0022250X.1971.9989794",
          "note": "Schelling (1971) Dynamic models of segregation. J Math Sociol 1:143"
        },
        {
          "doi": "10.1073/pnas.0600362103",
          "note": "Vinković & Kirman (2006) A physical analogue of the Schelling model. PNAS 103:19261"
        },
        {
          "note": "Mobilia & Georgiev (2005) Ising model equivalence of Schelling. Phys Rev E 71:046102"
        },
        {
          "doi": "10.1103/RevModPhys.81.591",
          "note": "Castellano et al. (2009) Statistical physics of social dynamics. Rev Mod Phys 81:591"
        }
      ],
      "last_reviewed": "2026-05-06",
      "file": "cross-domain/social-science-physics/b-schelling-ising-dynamics.yaml"
    },
    {
      "id": "b-social-stratification-statistical-mechanics",
      "title": "Social stratification and wealth inequality follow statistical mechanics distributions (Boltzmann-Gibbs for the bulk, Pareto for the tail), mapping economic exchange to two-body energy exchange and the Gini coefficient to a thermodynamic entropy measure.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "proposed",
      "bridge_claim": "In models where agents exchange fixed amounts of wealth in random pairwise transactions, the equilibrium wealth distribution converges to a Boltzmann-Gibbs exponential P(w) ~ exp(-w/T) (where T is average wealth per capita, playing the role of temperature); adding a savings propensity lambda produce",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-wealth-distribution-boltzmann-savings-propensity"
      ],
      "communication_gap": "Sociologists and economists study inequality using survey data and distributional statistics while statistical physicists develop kinetic exchange models; the econophysics approach (Dragulescu-Yakovenko 2000) has been criticized by economists for unrealistic micro-assumptions, and the productive exchange of modeling techniques between the communities remains limited.\n",
      "translation_table": [
        {
          "field_a_term": "wealth distribution P(w) (sociology/economics)",
          "field_b_term": "energy distribution in a statistical ensemble (statistical physics)",
          "note": "Wealth plays the role of energy; GDP per capita corresponds to temperature"
        },
        {
          "field_a_term": "pairwise economic exchange (economics)",
          "field_b_term": "two-body elastic collision with energy exchange (physics)",
          "note": "Random trade between agents conserves total wealth analogous to elastic collision conserving kinetic energy"
        },
        {
          "field_a_term": "Gini coefficient G (sociology)",
          "field_b_term": "normalized deviation from maximum entropy distribution (statistical physics)",
          "note": "G = 0 corresponds to maximum entropy (uniform distribution); G = 1 to minimum entropy (all wealth in one agent)"
        },
        {
          "field_a_term": "Pareto power-law tail in wealth (economics)",
          "field_b_term": "heavy tail from multiplicative noise / Kesten process (statistical physics)",
          "note": "Returns-on-wealth (investment returns proportional to wealth) create multiplicative noise generating Pareto tails"
        }
      ],
      "references": [
        {
          "doi": "10.1140/epjb/e2001-00192-1",
          "note": "Dragulescu & Yakovenko (2001) - evidence for Boltzmann distribution of money"
        },
        {
          "doi": "10.1016/j.physa.2003.10.024",
          "note": "Chatterjee & Chakrabarti (2004) - kinetic exchange models for income distributions"
        },
        {
          "doi": "10.48550/arXiv.cond-mat/0010110",
          "note": "Bouchaud & Mezard (2000) - wealth condensation in a simple model of economy"
        }
      ],
      "last_reviewed": "2026-05-07",
      "file": "cross-domain/social-science-physics/b-social-stratification-statistical-mechanics.yaml"
    },
    {
      "id": "b-sociophysics-cultural-dynamics",
      "title": "Axelrod's cultural dissemination model bridges social science and physics: a phase transition at critical q/F ratio separates monoculture from frozen multicultural states ΓÇö explaining why global communication has not eliminated cultural diversity, and predicting language death rates matching Zipf power-law observations.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "Axelrod's (1997) cultural dissemination model shows that local interaction can sustain global diversity. Agents have F cultural features, each with q traits. Interaction probability between two agents = fraction of shared features (cultural overlap). When two agents interact, one randomly chosen fea",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-cultural-phase-transition-globalization-diversity-paradox"
      ],
      "communication_gap": "Social scientists who study cultural diversity and globalization rarely use statistical physics models or engage with the mathematical theory of phase transitions. Physicists who develop sociophysics models often do not engage with the empirical social science literature on cultural change. The Axelrod model is widely cited in complexity science but rarely in anthropology or cultural studies, where the empirical data needed to validate it is held.\n",
      "translation_table": [
        {
          "field_a_term": "number of cultural features F",
          "field_b_term": "dimensionality of cultural space; lattice dimension analog",
          "note": "F controls the richness of the cultural representation space"
        },
        {
          "field_a_term": "number of traits per feature q",
          "field_b_term": "alphabet size; controls cross-cultural similarity probability",
          "note": "q/F ratio is the control parameter for the cultural phase transition"
        },
        {
          "field_a_term": "interaction probability (cultural overlap)",
          "field_b_term": "Hamming-distance-based interaction kernel (social physics)",
          "note": "only culturally similar agents interact ΓÇö homophily is built into the dynamics"
        },
        {
          "field_a_term": "frozen multicultural state (q/F > q_c/F)",
          "field_b_term": "absorbing state with multiple cultural domains",
          "note": "mathematically equivalent to a spin glass ground state with many metastable configurations"
        },
        {
          "field_a_term": "language extinction rate (~1/2 weeks globally)",
          "field_b_term": "decay of minority in Abrams-Strogatz model (power-law extinction curve)",
          "note": "Zipf distribution of language sizes suggests self-organized criticality in language evolution"
        }
      ],
      "references": [
        {
          "doi": "10.1177/0022002797041002006",
          "note": "Axelrod (1997) The dissemination of culture ΓÇö a model with local convergence and global polarization; J Conflict Resolut 41:203"
        },
        {
          "doi": "10.1103/RevModPhys.81.591",
          "note": "Castellano et al. (2009) Statistical physics of social dynamics; Rev Mod Phys 81:591"
        },
        {
          "doi": "10.1038/424900a",
          "note": "Abrams & Strogatz (2003) Modelling the dynamics of language death; Nature 424:900"
        },
        {
          "note": "Stauffer & Sahimi (2007) Discrete simulation of the dynamics of spread of extreme opinions in a society; Physica A 374:835"
        }
      ],
      "last_reviewed": "2026-05-07",
      "file": "cross-domain/social-science-physics/b-sociophysics-cultural-dynamics.yaml"
    },
    {
      "id": "b-traffic-flow-fluid-dynamics",
      "title": "Vehicular traffic flow obeys fluid-dynamic conservation laws: the LWR model maps vehicle density to fluid density and velocity to flow velocity, traffic jams propagate as shock waves satisfying the Rankine-Hugoniot condition, and phantom traffic jams arise from the same Turing-like linear instability that creates stop-and-go waves in supply chains, pedestrian crowds, and ant trails.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "Vehicular traffic flow obeys fluid-dynamic conservation laws. The LWR model: d(rho)/dt + d(rho×v)/dx = 0 (conservation of vehicles) with a fundamental diagram v(rho) relating velocity to density. Traffic jams = shock waves propagating backward at wave speed c = dq/drho (Rankine-Hugoniot condition fo",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-traffic-flow-turing-instability-stop-go"
      ],
      "communication_gap": "Lighthill & Whitham (1955) were fluid dynamicists applying their expertise to a social science problem. Traffic engineering developed largely independently in civil engineering departments, rediscovering many fluid mechanics results. The nonlinear dynamics community (Helbing, Nagel) reconnected traffic science to physics in the 1990s but many traffic engineers still use empirical models without awareness of the fluid mechanics foundation.\n",
      "translation_table": [
        {
          "field_a_term": "vehicle density rho (vehicles/km)",
          "field_b_term": "fluid density rho (kg/m^3)",
          "note": "conservation of vehicles maps exactly to conservation of mass"
        },
        {
          "field_a_term": "traffic flow q = rho × v (vehicles/hour)",
          "field_b_term": "fluid flux J = rho × v (kg/m^2/s)",
          "note": "identical conservation law d(rho)/dt + d(q)/dx = 0"
        },
        {
          "field_a_term": "traffic jam (stationary high-density region)",
          "field_b_term": "shock wave (discontinuity in density/velocity)",
          "note": "Rankine-Hugoniot: jam propagates backward at speed c = (q2-q1)/(rho2-rho1)"
        },
        {
          "field_a_term": "free-flow to congested phase transition",
          "field_b_term": "supersonic to subsonic flow transition (analogous to car entering jam)",
          "note": "critical density rho_c analogous to sonic point; LWR is a first-order conservation law"
        },
        {
          "field_a_term": "driver reaction time tau (1-2 seconds)",
          "field_b_term": "relaxation time in second-order traffic model (viscosity analog)",
          "note": "tau determines instability growth rate and stop-and-go wave formation threshold"
        }
      ],
      "references": [
        {
          "doi": "10.1098/rspa.1955.0089",
          "note": "Lighthill & Whitham (1955) Proc R Soc A 229:317 — kinematic wave theory"
        },
        {
          "doi": "10.1287/opre.4.1.42",
          "note": "Richards (1956) Oper Res 4:42 — shock waves on highways"
        },
        {
          "note": "Kerner & Konhauser (1993) Phys Rev E 48:R2335 — three-phase traffic theory"
        },
        {
          "doi": "10.1103/RevModPhys.73.1067",
          "note": "Helbing (2001) Rev Mod Phys 73:1067 — traffic and related self-driven particle systems"
        }
      ],
      "last_reviewed": "2026-05-06",
      "file": "cross-domain/social-science-physics/b-traffic-flow-fluid-dynamics.yaml"
    },
    {
      "id": "b-survey-causal-inference",
      "title": "The potential outcomes framework (Rubin) and Pearl's do-calculus provide the statistical foundations for causal inference from survey and observational data, connecting survey methodology to formal causal graph theory",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "The potential outcomes framework (Rubin 1974): each unit has potential outcomes Y(1) under treatment and Y(0) under control; the causal effect = Y(1) - Y(0), but only one is observed (the fundamental problem of causal inference). Randomised controlled trials (RCTs) solve this by making treatment ind",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-regression-discontinuity-surveys-local-average-treatment-effect-equivalence"
      ],
      "communication_gap": "Survey statisticians and causal inference researchers developed parallel frameworks with different vocabularies (design-based vs. model-based; potential outcomes vs. DAGs). Rubin and Pearl rarely cross-cited despite solving related problems. Applied social scientists often use one framework without knowing its connection to the other.\n",
      "translation_table": [
        {
          "field_a_term": "potential outcome Y(t)",
          "field_b_term": "counterfactual / do-operator do(T=t)",
          "note": "Both formalise what would happen under an intervention not actually observed"
        },
        {
          "field_a_term": "assignment mechanism (SUTVA, ignorability)",
          "field_b_term": "back-door criterion (no unblocked confounding path)",
          "note": "Both conditions ensure that observed association equals causal effect"
        },
        {
          "field_a_term": "propensity score e(X) = P(T=1|X)",
          "field_b_term": "inverse probability weighting (IPW)",
          "note": "Propensity scores reweight the sample to approximate a randomised experiment"
        },
        {
          "field_a_term": "instrumental variable Z (exclusion, relevance, independence)",
          "field_b_term": "front-door criterion in DAG",
          "note": "Both identify causal effect when direct randomisation is impossible"
        }
      ],
      "references": [
        {
          "note": "Rubin (1974) — potential outcomes framework for causal inference",
          "doi": "10.1037/h0037350"
        },
        {
          "note": "Pearl (2000) Causality — do-calculus and DAG-based causal inference"
        },
        {
          "note": "Angrist & Pischke (2009) Mostly Harmless Econometrics — IV and RD methods"
        },
        {
          "note": "Imbens & Rubin (2015) Causal Inference for Statistics, Social, and Biomedical Sciences"
        }
      ],
      "last_reviewed": "2026-05-06",
      "file": "cross-domain/social-science-statistics/b-survey-causal-inference.yaml"
    },
    {
      "id": "b-liquid-crystals-frank-elasticity",
      "title": "Liquid crystal orientational order is described by the Frank elastic free energy functional F=∫[K1(∇·n̂)²+K2(n̂·∇×n̂)²+K3(n̂×∇×n̂)²]dV, which maps onto the Landau theory with a vector order parameter",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "In a liquid crystal, rod-shaped molecules locally align along a director field n̂(r) (unit vector). The Frank-Oseen elastic free energy density penalizes deformations: f_el = (K₁/2)(∇·n̂)² + (K₂/2)(n̂·∇×n̂)² + (K₃/2)|n̂×(∇×n̂)|², where K₁, K₂, K₃ are the splay, twist, and bend elastic constants (uni",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-liquid-crystals-frank-elasticity"
      ],
      "communication_gap": "Liquid crystal physicists who work with the Frank free energy rarely connect to the Landau-Ginzburg field theory literature in condensed matter physics, where the same mathematics appears for superconductors (Ginzburg-Landau), magnets (Heisenberg model), and other order-parameter systems. The topological defect analysis (homotopy groups) is standard in high-energy physics for cosmic strings but non-standard in soft matter despite the direct mathematical analogy.\n",
      "translation_table": [
        {
          "field_a_term": "liquid crystal director field n̂(r)",
          "field_b_term": "order parameter field in Landau-Ginzburg theory",
          "note": "n̂ is a headless unit vector (n̂ = -n̂); the order parameter space is RP² (projective plane)"
        },
        {
          "field_a_term": "Frank elastic constants K₁, K₂, K₃ (pN)",
          "field_b_term": "gradient-energy coefficients in the Landau-Ginzburg free energy expansion",
          "note": "Typical values: K₁~K₃~10 pN, K₂~5 pN for 5CB liquid crystal"
        },
        {
          "field_a_term": "Freedericksz transition critical field B_c",
          "field_b_term": "critical point of the second-order transition in the field-temperature phase diagram",
          "note": "B_c ∝ √K/d; used to measure elastic constants from optical birefringence"
        },
        {
          "field_a_term": "topological defects (disclinations, ±1/2 and ±1 strength)",
          "field_b_term": "topological solitons characterized by homotopy groups π₁(RP²)=Z₂",
          "note": "Only half-integer disclinations are topologically stable in nematics; integer ones can escape"
        }
      ],
      "references": [
        {
          "doi": "10.1039/tf9585400046",
          "note": "Frank (1958) On the theory of liquid crystals. Disc Faraday Soc 25:19 — original Frank elastic theory"
        },
        {
          "doi": "10.1103/PhysRevLett.99.157801",
          "note": "Lavrentovich (2007) Topological defects in liquid crystals — recent trends and new horizons. Liq Cryst 34:851"
        },
        {
          "doi": "10.1038/nphys3600",
          "note": "Marchetti et al. (2013) Hydrodynamics of soft active matter. Rev Mod Phys 85:1143"
        }
      ],
      "last_reviewed": "2026-05-07",
      "file": "cross-domain/soft-matter-physics/b-liquid-crystals-frank-elasticity.yaml"
    },
    {
      "id": "b-granular-matter-jamming-transition",
      "title": "Dense granular materials undergo a jamming transition from fluid-like to solid-like behaviour analogous to a second-order phase transition in statistical physics: at packing fraction phi_c ~ 0.64 (random close packing) the contact network percolates, diverging length and time scales appear, and the system's response maps onto the critical phenomena universality class of mean-field percolation",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "As a granular packing is compressed above the jamming point phi_J, the excess contact number Z - Z_c ~ (phi - phi_J)^0.5 and the shear modulus G ~ (phi - phi_J)^0.5 diverge with the same power-law exponents predicted by mean-field constraint-counting (Maxwell criterion), while the bulk modulus scale",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-jamming-transition-critical-exponents"
      ],
      "communication_gap": "Engineers studying granular flow focus on practical bulk behavior while condensed matter physicists analyze jamming as an idealized critical phenomenon; the universality class of jamming is still debated (mean-field vs. non-mean-field exponents) and experimental access to the diverging length scale remains difficult.",
      "translation_table": [
        {
          "field_a_term": "packing fraction phi relative to phi_J (soft matter)",
          "field_b_term": "reduced temperature (T - T_c)/T_c near critical point (statistical physics)",
          "note": "Both serve as the control parameter measuring distance from the transition; power laws appear in both limits"
        },
        {
          "field_a_term": "excess coordination number Z - Z_c (soft matter)",
          "field_b_term": "order parameter in a second-order phase transition (statistical physics)",
          "note": "Z - Z_c acts as order parameter; it is zero below jamming and grows as a power law above phi_J"
        },
        {
          "field_a_term": "force chain network in jammed packing (soft matter)",
          "field_b_term": "percolating cluster at percolation threshold (statistical physics)",
          "note": "The jammed solid corresponds to a percolating force-chain network; jamming = contact percolation"
        },
        {
          "field_a_term": "diverging length scale xi of soft modes at jamming (soft matter)",
          "field_b_term": "diverging correlation length near critical point (statistical physics)",
          "note": "Both diverge as |phi - phi_J|^{-nu} signalling cooperative rearrangements at all scales"
        }
      ],
      "references": [
        {
          "doi": "10.1038/35035037",
          "note": "Liu & Nagel (1998) - jamming is not just cool any more (Nature perspective introducing jamming phase diagram)"
        },
        {
          "doi": "10.1103/PhysRevLett.95.098301",
          "note": "O'Hern et al. (2003) - jamming at zero temperature and zero applied stress: the epitome of disorder"
        },
        {
          "doi": "10.1103/PhysRevE.68.011306",
          "note": "O'Hern et al. (2003) - random packings and jamming transition: critical scaling"
        }
      ],
      "last_reviewed": "2026-05-07",
      "file": "cross-domain/soft-matter-statistical-physics/b-granular-matter-jamming-transition.yaml"
    },
    {
      "id": "b-nematic-ordering-maier-saupe-mean-field",
      "title": "Nematic liquid crystal ordering is a mean-field phase transition described by the Maier-Saupe theory: the order parameter S = <P_2(cos theta)> (second Legendre polynomial of orientational angle) undergoes a weakly first-order isotropic-to-nematic transition driven by anisotropic van der Waals interactions, with all thermodynamic properties derivable from the mean-field self-consistency equation.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "Maier & Saupe (1958) derived a mean-field theory for the isotropic-nematic (I-N) transition by replacing the interaction of each molecule with all others by an effective field U = -u * S * P_2(cos theta), where u is the interaction strength and S is the nematic order parameter. The self-consistency ",
      "open_unknowns": [],
      "related_hypotheses": [],
      "communication_gap": "Soft matter physicists apply Maier-Saupe theory quantitatively but rarely connect it to the broader mean-field universality class literature in statistical physics; statistical physicists studying order-disorder transitions sometimes use Ising/Heisenberg models without engaging with the liquid crystal literature where mean-field theory is exceptionally accurate. The tensor nature of the order parameter adds complexity that inhibits cross-fertilisation.\n",
      "translation_table": [
        {
          "field_a_term": "mean-field self-consistency equation (statistical physics)",
          "field_b_term": "Maier-Saupe equation for nematic order parameter S (soft matter)",
          "note": "S = f(S, T) solved self-consistently; analogous to Curie-Weiss ferromagnetism"
        },
        {
          "field_a_term": "order parameter S = <P_2(cos theta)> (statistical physics)",
          "field_b_term": "degree of molecular alignment along director (soft matter)",
          "note": "S = 0 (isotropic), S = 1 (perfect alignment); S_NI ~ 0.44 at transition"
        },
        {
          "field_a_term": "weakly first-order phase transition (statistical physics)",
          "field_b_term": "discontinuous jump in birefringence at the I-N transition (soft matter)",
          "note": "First-order character predicted by mean field; confirmed by latent heat measurement"
        },
        {
          "field_a_term": "Landau expansion in powers of order parameter (statistical physics)",
          "field_b_term": "de Gennes-Landau theory near T_NI (soft matter)",
          "note": "Free energy F = a(T-T*)S^2 - bS^3 + cS^4; cubic term forces first-order transition"
        }
      ],
      "references": [
        {
          "doi": "10.1515/zna-1958-0902",
          "note": "Maier & Saupe (1958) - eine einfache molekular-statistische theorie der nematischen kristallinflüssigen phase"
        },
        {
          "doi": "10.1080/15421406808082675",
          "note": "de Gennes (1969) - phenomenology of short-range order in the isotropic phase of liquid crystals"
        },
        {
          "doi": "10.1093/acprof:oso/9780198520245.001.0001",
          "note": "Chaikin & Lubensky (1995) - Principles of Condensed Matter Physics; nematic order parameter"
        }
      ],
      "last_reviewed": "2026-05-07",
      "file": "cross-domain/soft-matter-statistical-physics/b-nematic-ordering-maier-saupe-mean-field.yaml"
    },
    {
      "id": "b-boltzmann-shannon-entropy",
      "title": "Boltzmann's entropy S = k_B ln W and Shannon's entropy H = −Σ p_i log p_i are formally identical — thermodynamic entropy IS the Shannon information entropy of the macroscopic probability distribution over microstates.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "Boltzmann's entropy S = k_B ln W (W = number of equally probable microstates) and Shannon's entropy H = −Σ p_i log p_i (probability distribution over messages) are the same mathematical object up to the Boltzmann constant k_B and a factor of ln 2 (bit vs nat). The equivalence S = k_B ln 2 · H holds ",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-maxent-nonequilibrium-statistical-mechanics"
      ],
      "communication_gap": "Physics and information theory developed the entropy concept independently (Boltzmann 1877 and Shannon 1948 are 71 years apart). Many physicists learn Boltzmann entropy as a physical fact, not as an inference framework. Many information theorists do not know that H is measured in joules/kelvin when multiplied by k_B. Jaynes's unification is underappreciated outside statistical physics.\n",
      "translation_table": [
        {
          "field_a_term": "Boltzmann entropy S = k_B ln W",
          "field_b_term": "Shannon entropy H = −Σ p_i log p_i (uniform distribution)",
          "note": "S = k_B ln 2 · H; differ only by units (joules/kelvin vs bits)"
        },
        {
          "field_a_term": "number of microstates W consistent with macrostate",
          "field_b_term": "number of distinguishable messages of a given probability",
          "note": "Same combinatorial object — Boltzmann's W is Shannon's code length exponent"
        },
        {
          "field_a_term": "thermodynamic equilibrium (maximum entropy state)",
          "field_b_term": "maximum entropy distribution (MaxEnt principle)",
          "note": "Jaynes showed equilibrium statistical mechanics = MaxEnt inference"
        },
        {
          "field_a_term": "partition function Z = Σ exp(−E_i / k_B T)",
          "field_b_term": "moment generating function of the energy distribution",
          "note": "Z encodes all thermodynamic information as a Laplace transform"
        },
        {
          "field_a_term": "free energy F = −k_B T ln Z",
          "field_b_term": "log partition function (cumulant generating function)",
          "note": "Variational free energy = KL-divergence in variational Bayes"
        }
      ],
      "references": [
        {
          "note": "Boltzmann (1877) Über die Beziehung zwischen dem zweiten Hauptsatze der mechanischen Wärmetheorie. Sitzungsber Akad Wiss Wien 76:373-435"
        },
        {
          "note": "Shannon (1948) A mathematical theory of communication. Bell Syst Tech J 27:379-423"
        },
        {
          "note": "Jaynes (1957) Information theory and statistical mechanics I. Phys Rev 106:620-630"
        },
        {
          "note": "Jaynes (1957) Information theory and statistical mechanics II. Phys Rev 108:171-190"
        }
      ],
      "last_reviewed": "2026-05-06",
      "file": "cross-domain/statistical-mechanics-information-theory/b-boltzmann-shannon-entropy.yaml"
    },
    {
      "id": "b-stochastic-thermodynamics-fluctuation-theorems",
      "title": "Fluctuation theorems (Crooks, Jarzynski) connect nonequilibrium work distributions to equilibrium free energy differences, bridging stochastic thermodynamics and information theory through the mathematical identity between entropy production and relative entropy (KL divergence).\n",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "The Crooks fluctuation theorem exp(W/kT) = exp(DeltaF/kT) * P_R(-W)/P_F(W) and the Jarzynski equality <exp(-W/kT)> = exp(-DeltaF/kT) establish that entropy production in nonequilibrium processes equals the KL divergence between forward and time-reversed trajectory probability distributions, making i",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-landauer-limit-biological-computation"
      ],
      "communication_gap": "Physicists developing stochastic thermodynamics and information theorists studying thermodynamic limits of computation arrived at identical mathematics through different routes; the Crooks-Jarzynski-Landauer connection is recognized in statistical physics but not widely known in information theory or computer science communities where thermodynamic limits of computation are actively debated.\n",
      "translation_table": [
        {
          "field_a_term": "entropy production sigma (thermodynamics)",
          "field_b_term": "KL divergence D_KL(P_F || P_R) between forward/reverse trajectories",
          "note": "Irreversibility equals the information-theoretic distinguishability of time's arrow"
        },
        {
          "field_a_term": "free energy difference DeltaF (thermodynamics)",
          "field_b_term": "log ratio of partition functions (statistical mechanics / information theory)",
          "note": "Jarzynski equality recovers DeltaF from nonequilibrium work measurements"
        },
        {
          "field_a_term": "Landauer erasure cost kT ln 2 per bit (thermodynamics)",
          "field_b_term": "minimum thermodynamic cost of information erasure (information theory)",
          "note": "Information erasure requires entropy production; connects Maxwell's demon to thermodynamics"
        },
        {
          "field_a_term": "fluctuation theorem ratio P_F(W)/P_R(-W) (thermodynamics)",
          "field_b_term": "likelihood ratio / Bayes factor (information theory)",
          "note": "The Crooks relation is a statistical likelihood ratio for thermodynamic work fluctuations"
        }
      ],
      "references": [
        {
          "doi": "10.1103/PhysRevE.60.2721",
          "note": "Crooks (1999) - entropy production fluctuation theorem and Jarzynski equality"
        },
        {
          "doi": "10.1103/PhysRevLett.78.2690",
          "note": "Jarzynski (1997) - nonequilibrium equality for free energy differences"
        },
        {
          "doi": "10.1038/nature10872",
          "note": "Berut et al. (2012) - experimental verification of Landauer's principle"
        }
      ],
      "last_reviewed": "2026-05-07",
      "file": "cross-domain/statistical-mechanics-information-theory/b-stochastic-thermodynamics-fluctuation-theorems.yaml"
    },
    {
      "id": "b-kramers-moyal-expansion-x-tumor-phenotype-transition-modeling",
      "title": "Kramers-Moyal moment expansions can transfer from stochastic physics to tumor phenotype transition models.",
      "source_domain": "",
      "target_domain": "",
      "status": "proposed",
      "bridge_claim": "Speculative analogy: Kramers-Moyal moment expansions can transfer from stochastic physics to tumor phenotype transition models.",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-kramers-moyal-surrogates-improve-tumor-state-transition-forecast-calibration"
      ],
      "communication_gap": "Domain-specific vocabularies and benchmark conventions obscure transferable mathematical structure.",
      "translation_table": [],
      "references": [
        {
          "doi": "10.1098/rsta.1922.0009",
          "note": "Classical stochastic-process formalism underpinning diffusion approximations."
        }
      ],
      "last_reviewed": "2026-05-08",
      "file": "cross-domain/statistical-physics-oncology/b-kramers-moyal-expansion-x-tumor-phenotype-transition-modeling.yaml"
    },
    {
      "id": "b-thermodynamic-uncertainty-relation-x-estimation-precision",
      "title": "Thermodynamic uncertainty relations connect entropy production budgets to lower bounds on estimator variance in nonequilibrium biochemical sensing.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "proposed",
      "bridge_claim": "Thermodynamic uncertainty relations (TURs) bound current fluctuations by dissipation, implying that high-precision nonequilibrium sensing requires energetic cost. This maps directly to statistical efficiency language: variance reduction has a minimum entropy-production price in driven systems.\n",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-tur-constrained-estimators-predict-atp-cost-precision-frontier"
      ],
      "communication_gap": "Statistical inference workflows optimize variance and bias, while biophysical studies report ATP or entropy costs separately; TUR gives a shared quantitative bridge.\n",
      "translation_table": [
        {
          "field_a_term": "entropy production rate",
          "field_b_term": "precision-cost tradeoff in estimator design",
          "note": "Lower relative variance requires larger dissipation budget."
        },
        {
          "field_a_term": "integrated current fluctuation bounds",
          "field_b_term": "confidence interval floor under finite-time sampling",
          "note": "TUR imposes irreducible uncertainty at fixed energetic throughput."
        },
        {
          "field_a_term": "nonequilibrium steady-state currents",
          "field_b_term": "biased but low-variance biochemical readout channels",
          "note": "Energetic drive can improve speed/precision but never for free."
        }
      ],
      "references": [
        {
          "doi": "10.1103/PhysRevLett.114.158101",
          "note": "Barato and Seifert (2015), thermodynamic uncertainty relation for biomolecular processes."
        }
      ],
      "last_reviewed": "2026-05-08",
      "file": "cross-domain/statistical-physics-statistics/b-thermodynamic-uncertainty-relation-x-estimation-precision.yaml"
    },
    {
      "id": "b-fisher-information-evolution",
      "title": "R.A. Fisher's fundamental theorem of natural selection and his Fisher information matrix in statistics are the same mathematical object — the rate of increase of mean fitness equals the population's statistical Fisher information about fitness, and this identity gives evolutionary biology the full toolkit of statistical estimation theory.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "proposed",
      "bridge_claim": "R.A. Fisher invented both: (a) the Fisher information matrix I(theta) in statistics (1925) — the expected curvature of the log-likelihood, whose inverse gives the Cramér-Rao lower bound on estimation variance; and (b) the fundamental theorem of natural selection (1930) — the rate of change of mean f",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-fisher-speed-limit-selection",
        "h-quantum-compass-precision"
      ],
      "communication_gap": "Fisher (1930) proved the fundamental theorem in the language of biometry, without reference to his own 1925 information matrix. Ewens, Frank, and Kimura connected the two in the genetics literature, but these papers are technical and not widely read by working evolutionary biologists. Machine learning researchers who daily use the natural gradient are completely unaware that they are implementing an operation mathematically equivalent to natural selection on a fitness landscape. The bridge requires only notation translation — no new mathematics — but the translation has never been made accessible to practitioners in all three fields simultaneously.\n",
      "translation_table": [
        {
          "field_a_term": "Fisher information I(theta)",
          "field_b_term": "additive genetic variance in fitness (V_A)"
        },
        {
          "field_a_term": "Cramér-Rao lower bound (var >= 1/I)",
          "field_b_term": "speed limit on natural selection (dW_bar/dt <= V_A)"
        },
        {
          "field_a_term": "score function (d/dtheta log p(x|theta))",
          "field_b_term": "selection gradient (direction of steepest fitness increase in phenotype space)"
        },
        {
          "field_a_term": "maximum likelihood estimator (achieves Cramér-Rao bound)",
          "field_b_term": "optimal evolutionary strategy (maximises V_A for given environmental uncertainty)"
        },
        {
          "field_a_term": "Fisher information metric (Riemannian metric on probability simplex)",
          "field_b_term": "information geometry of the genotype-frequency distribution under selection"
        },
        {
          "field_a_term": "natural gradient (steepest ascent in Fisher metric)",
          "field_b_term": "natural selection (steepest ascent on fitness landscape corrected for population geometry)"
        },
        {
          "field_a_term": "quantum Fisher information (QFI)",
          "field_b_term": "quantum speed limit on state estimation (Cramér-Rao for quantum measurements)"
        }
      ],
      "references": [
        {
          "doi": "10.1111/j.1469-1809.1930.tb01202.x",
          "note": "Fisher (1930) — The Genetical Theory of Natural Selection; fundamental theorem of natural selection"
        },
        {
          "doi": "10.1214/aoms/1177729694",
          "note": "Fisher (1925) — Theory of statistical estimation; Fisher information matrix and Cramer-Rao bound"
        },
        {
          "doi": "10.1073/pnas.89.7.2912",
          "note": "Frank & Slatkin (1992) — Fisher's fundamental theorem restated in terms of partial regression; explicit connection to variance decomposition"
        },
        {
          "doi": "10.1162/089976698300017746",
          "note": "Amari (1998) — Natural gradient works efficiently in learning; the machine-learning form of the Fisher metric"
        },
        {
          "arxiv": "1301.3666",
          "note": "Martens (2014) — New insights and perspectives on the natural gradient; KFAC connection"
        }
      ],
      "last_reviewed": "2026-05-04",
      "file": "cross-domain/statistics-evolution/b-fisher-information-evolution.yaml"
    },
    {
      "id": "b-deseq2-shrinkage-estimation-x-low-count-clinical-biomarker-surveillance",
      "title": "DESeq2-style shrinkage estimation bridges RNA-seq dispersion modeling and low-count clinical biomarker surveillance.",
      "source_domain": "",
      "target_domain": "",
      "status": "proposed",
      "bridge_claim": "Speculative analogy: Empirical-Bayes dispersion shrinkage from RNA-seq analysis can reduce false alerts in low-count clinical biomarker surveillance streams.",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-deseq2-style-shrinkage-reduces-false-alerts-in-low-count-clinical-monitoring"
      ],
      "communication_gap": "Genomics pipelines and clinical surveillance systems use similar count statistics but differ in quality-control assumptions and operational thresholds.",
      "translation_table": [],
      "references": [
        {
          "doi": "10.1186/s13059-014-0550-8",
          "note": "DESeq2 moderated fold-change and dispersion estimation."
        }
      ],
      "last_reviewed": "2026-05-08",
      "file": "cross-domain/statistics-medicine/b-deseq2-shrinkage-estimation-x-low-count-clinical-biomarker-surveillance.yaml"
    },
    {
      "id": "b-elastic-net-regularization-x-polygenic-risk-model-stability",
      "title": "Elastic-net regularization links high-dimensional regression theory to clinically deployable polygenic risk modeling.",
      "source_domain": "",
      "target_domain": "",
      "status": "proposed",
      "bridge_claim": "Speculative analogy: Elastic-net shrinkage balances sparsity and grouped effects in a way that can stabilize polygenic risk scores across correlated genomic features.",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-elastic-net-prs-retraining-with-ancestry-balancing-reduces-calibration-drift"
      ],
      "communication_gap": "Clinical genetics emphasizes cohort transferability while statistical-learning studies emphasize prediction metrics on fixed splits.",
      "translation_table": [],
      "references": [
        {
          "doi": "10.1111/j.1467-9868.2005.00503.x",
          "note": "Original elastic-net formulation."
        }
      ],
      "last_reviewed": "2026-05-08",
      "file": "cross-domain/statistics-medicine/b-elastic-net-regularization-x-polygenic-risk-model-stability.yaml"
    },
    {
      "id": "b-laplace-approximation-x-clinical-trial-adaptive-enrichment",
      "title": "Laplace-approximation workflows can transfer from Bayesian inference to adaptive enrichment in clinical trials.",
      "source_domain": "",
      "target_domain": "",
      "status": "proposed",
      "bridge_claim": "Speculative analogy: Laplace-approximation workflows can transfer from Bayesian inference to adaptive enrichment in clinical trials.",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-laplace-approximated-interim-rules-improve-enrichment-decision-efficiency"
      ],
      "communication_gap": "Domain-specific vocabularies and benchmark conventions obscure transferable mathematical structure.",
      "translation_table": [],
      "references": [
        {
          "doi": "10.1080/01621459.1994.10476795",
          "note": "Classical decision-theoretic benchmark for stopping-rule calibration context."
        }
      ],
      "last_reviewed": "2026-05-08",
      "file": "cross-domain/statistics-medicine/b-laplace-approximation-x-clinical-trial-adaptive-enrichment.yaml"
    },
    {
      "id": "b-bayesian-inference-stat-mech",
      "title": "The Bayesian normalizing constant (evidence) is formally identical to the statistical-mechanical partition function Z = Σ exp(-E/T); sampling from the posterior is equivalent to sampling from a Gibbs distribution; and MCMC algorithms are molecular dynamics simulations on the posterior energy landscape, making statistical physics and Bayesian inference the same mathematical theory.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "The partition function in statistical mechanics Z = Σ_x exp(-E(x)/kT) normalizes the Boltzmann distribution P(x) = exp(-E(x)/kT)/Z over all configurations x. In Bayesian inference, the posterior P(θ|data) = P(data|θ)P(θ)/Z_Bayes where Z_Bayes = Σ_θ P(data|θ)P(θ) is the marginal likelihood (evidence)",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-variational-bayes-mean-field-equivalence"
      ],
      "communication_gap": "Jaynes (1957) made the MaxEnt-Boltzmann connection explicitly but worked as a physicist writing for physicists. The MCMC literature in statistics developed largely independently of the molecular simulation literature, with Gelfand & Smith (1990) and Geman & Geman (1984) rediscovering the Metropolis algorithm for statistical purposes. MacKay (2003) — Information Theory, Inference and Learning Algorithms — made this bridge central and accessible, but Bayesian computation textbooks rarely cite condensed matter physics.\n",
      "translation_table": [
        {
          "field_a_term": "partition function Z = Σ exp(-E/kT)",
          "field_b_term": "Bayesian evidence (marginal likelihood) Z = Σ P(data|θ)P(θ)",
          "note": "identical mathematical object; computing Z is"
        },
        {
          "field_a_term": "Boltzmann distribution P(x) = exp(-E/kT)/Z",
          "field_b_term": "Bayesian posterior P(θ|data) ∝ exp(log likelihood + log prior)",
          "note": "the posterior at T=1 is the Gibbs distribution with E = -log joint"
        },
        {
          "field_a_term": "temperature T (controls distribution breadth)",
          "field_b_term": "inverse regularization strength (1/T posterior = tempered posterior)",
          "note": "T>1 smooths the posterior; T→∞ → uniform; used in parallel tempering MCMC"
        },
        {
          "field_a_term": "Metropolis-Hastings MCMC",
          "field_b_term": "posterior sampling algorithm for Bayesian inference",
          "note": "literally the same algorithm; invented for molecular simulation in 1953"
        },
        {
          "field_a_term": "mean field theory (variational approximation)",
          "field_b_term": "variational Bayes (approximate posterior with factorized q)",
          "note": "minimize variational free energy = minimize KL(q||p) - log Z"
        },
        {
          "field_a_term": "renormalization group (coarse-graining)",
          "field_b_term": "hierarchical Bayesian model (marginalizing fine-grained parameters)"
        }
      ],
      "references": [
        {
          "doi": "10.1103/PhysRev.106.620",
          "note": "Jaynes (1957) — Information theory and statistical mechanics; Phys Rev 106:620"
        },
        {
          "doi": "10.1063/1.1699112",
          "note": "Metropolis et al. (1953) — Equation of state calculations by fast computing machines; J Chem Phys 21:1087"
        },
        {
          "note": "MacKay (2003) — Information Theory, Inference and Learning Algorithms; Cambridge University Press"
        },
        {
          "note": "Mezard & Montanari (2009) — Information, Physics and Computation; Oxford University Press"
        }
      ],
      "last_reviewed": "2026-05-06",
      "file": "cross-domain/statistics-physics/b-bayesian-inference-stat-mech.yaml"
    },
    {
      "id": "b-optimal-transport-barycenters-x-multiomic-patient-alignment",
      "title": "Optimal-transport barycenters can transfer from distributional geometry to cross-cohort multiomic alignment.",
      "source_domain": "",
      "target_domain": "",
      "status": "proposed",
      "bridge_claim": "Speculative analogy: Optimal-transport barycenters can transfer from distributional geometry to cross-cohort multiomic alignment.",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-ot-barycenter-alignment-improves-cross-cohort-multiomic-risk-stratification"
      ],
      "communication_gap": "Domain-specific vocabularies and benchmark conventions obscure transferable mathematical structure.",
      "translation_table": [],
      "references": [
        {
          "doi": "10.1109/cvpr.2016.90",
          "note": "Representation-transfer context motivating geometry-aware alignment objectives."
        }
      ],
      "last_reviewed": "2026-05-08",
      "file": "cross-domain/statistics-systems-biology/b-optimal-transport-barycenters-x-multiomic-patient-alignment.yaml"
    },
    {
      "id": "b-optimal-transport-x-single-cell-developmental-lineage-mapping",
      "title": "Optimal transport couplings align probability geometry with developmental lineage inference in single-cell systems.",
      "source_domain": "",
      "target_domain": "",
      "status": "proposed",
      "bridge_claim": "Speculative analogy: Entropic optimal transport provides a mathematically coherent bridge between distributional geometry and developmental lineage transitions in single-cell atlases.",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-optimal-transport-lineage-couplings-improve-fate-prediction-calibration"
      ],
      "communication_gap": "Single-cell biology emphasizes lineage interpretability while transport theory emphasizes geometric optimality, and shared diagnostics remain immature.",
      "translation_table": [],
      "references": [
        {
          "doi": "10.1038/s41586-019-1773-3",
          "note": "Optimal-transport formulation for developmental trajectories."
        }
      ],
      "last_reviewed": "2026-05-08",
      "file": "cross-domain/statistics-systems-biology/b-optimal-transport-x-single-cell-developmental-lineage-mapping.yaml"
    },
    {
      "id": "b-variational-autoencoders-x-single-cell-latent-state-denoising",
      "title": "Variational autoencoder inference links probabilistic latent-variable modeling with single-cell state denoising.",
      "source_domain": "",
      "target_domain": "",
      "status": "proposed",
      "bridge_claim": "Speculative analogy: Variational latent-variable models can separate biological signal from technical noise in sparse single-cell count data.",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-beta-vae-regularization-improves-single-cell-state-separability"
      ],
      "communication_gap": "Single-cell pipelines prioritize biological annotation, while VAE literature often prioritizes generic reconstruction and ELBO metrics.",
      "translation_table": [],
      "references": [
        {
          "arxiv": "1312.6114",
          "note": "Auto-Encoding Variational Bayes."
        }
      ],
      "last_reviewed": "2026-05-08",
      "file": "cross-domain/statistics-systems-biology/b-variational-autoencoders-x-single-cell-latent-state-denoising.yaml"
    },
    {
      "id": "b-markov-jump-processes-x-cell-state-switching-therapy-design",
      "title": "Markov jump process control can transfer from stochastic systems engineering to cell-state switching therapy design.",
      "source_domain": "",
      "target_domain": "",
      "status": "proposed",
      "bridge_claim": "Speculative analogy: Markov jump process control can transfer from stochastic systems engineering to cell-state switching therapy design.",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-markov-jump-therapy-policies-reduce-relapse-prone-cell-state-occupancy"
      ],
      "communication_gap": "Domain-specific vocabularies and benchmark conventions obscure transferable mathematical structure.",
      "translation_table": [],
      "references": [
        {
          "doi": "10.1111/j.1365-246X.2005.02489.x",
          "note": "Adjoint/state-transition methodology as a transferable calibration scaffold."
        }
      ],
      "last_reviewed": "2026-05-08",
      "file": "cross-domain/stochastic-processes-oncology/b-markov-jump-processes-x-cell-state-switching-therapy-design.yaml"
    },
    {
      "id": "b-synthetic-genetics-xna-alphabet",
      "title": "Xeno-nucleic acids (XNAs) with chemically modified backbones (HNA, CeNA, LNA, FANA, TNA) can store and propagate genetic information through in vitro evolution, demonstrating that the Watson-Crick hydrogen-bonding code is substrate-independent: Darwinian evolution does not require the ribose- phosphate backbone of natural DNA/RNA.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "Pinheiro et al. (2012) evolved polymerases capable of transcribing DNA into six different XNAs and back-transcribing XNA into DNA, demonstrating faithful information transfer across the chemical boundary. HNA (hexitol nucleic acid) and FANA (2'F-ANA) aptamers and ribozymes were selected by in vitro ",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-xna-ribozyme-catalytic-efficiency-backbone-independence"
      ],
      "communication_gap": "Synthetic organic chemists developing nucleic acid analogues (LNA, phosphorothioate) for therapeutic applications and evolutionary biologists studying the chemical origins of life rarely collaborate. Pinheiro et al. (2012) bridged these communities in Science, but clinical development of XNA therapeutics and origins- of-life research remain siloed.\n",
      "translation_table": [
        {
          "field_a_term": "Ribose-phosphate backbone (DNA/RNA)",
          "field_b_term": "Hexitol/threose/fluoroarabinose backbone (XNA)",
          "note": "Different backbone; same Watson-Crick base pairing; same information-theoretic capacity"
        },
        {
          "field_a_term": "DNA polymerase (natural template-directed synthesis)",
          "field_b_term": "Engineered XNA polymerase (in vitro selected reverse transcriptase)",
          "note": "XNA polymerases transfer genetic information from XNA to DNA; reverse transcription across chemistries"
        },
        {
          "field_a_term": "Darwinian evolution (mutation + selection + heredity)",
          "field_b_term": "In vitro selection on XNA libraries (SELEX on XNA)",
          "note": "XNA aptamers evolve function through same iterative selection mechanism as natural evolution"
        },
        {
          "field_a_term": "DNA sequence space (4^N configurations)",
          "field_b_term": "XNA sequence space with same four bases; different backbone flexibility",
          "note": "Both implement a 4-letter alphabet; backbone rigidity changes the fitness landscape geometry"
        }
      ],
      "references": [
        {
          "doi": "10.1126/science.1221558",
          "note": "Pinheiro et al. (2012) Science – genetic encoding and chemical evolution of six XNAs; Darwinian XNA evolution"
        },
        {
          "doi": "10.1038/nature12343",
          "note": "Malyshev et al. (2014) Nature – a semi-synthetic organism with an expanded genetic alphabet (d5SICS/dNaM)"
        },
        {
          "doi": "10.1038/nchem.1929",
          "note": "Taylor et al. (2015) Nature Chemistry – catalysts from synthetic genetic polymers; HNA ribozymes"
        },
        {
          "doi": "10.1126/science.1213351",
          "note": "Benner – defining life; synthetic biology and origins of life: genetic letters"
        }
      ],
      "last_reviewed": "2026-05-07",
      "file": "cross-domain/synthetic-biology-chemistry/b-synthetic-genetics-xna-alphabet.yaml"
    },
    {
      "id": "b-simclr-x-multiomics-latent-alignment",
      "title": "Contrastive representation learning bridges SimCLR invariance objectives and multi-omics latent alignment across assay modalities.",
      "source_domain": "",
      "target_domain": "",
      "status": "proposed",
      "bridge_claim": "Speculative analogy (to be empirically validated): contrastive objectives that maximize agreement between paired views can align transcriptomic, epigenomic, and proteomic profiles into shared latent coordinates while suppressing batch artifacts.",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-contrastive-pretraining-improves-multiomics-transfer-stability"
      ],
      "communication_gap": "Contrastive ML benchmarks emphasize generic transfer, while systems biology requires mechanistic interpretability and assay-aware uncertainty estimates.",
      "translation_table": [
        {
          "field_a_term": "positive pair",
          "field_b_term": "matched multi-omic sample views",
          "note": "Paired modalities define cross-view consistency targets."
        },
        {
          "field_a_term": "augmentation invariance",
          "field_b_term": "batch-effect robustness",
          "note": "Invariance regularization may reduce assay-specific nuisance variation."
        },
        {
          "field_a_term": "temperature-scaled contrastive loss",
          "field_b_term": "modality-separation control",
          "note": "Temperature tunes inter-class versus intra-class compactness."
        }
      ],
      "references": [
        {
          "arxiv": "2002.05709",
          "note": "A Simple Framework for Contrastive Learning of Visual Representations (SimCLR)."
        },
        {
          "url": "https://www.nature.com/articles/s41586-021-03922-0",
          "note": "Representative multi-omics atlas motivation."
        }
      ],
      "last_reviewed": "2026-05-08",
      "file": "cross-domain/systems-biology-machine-learning/b-simclr-x-multiomics-latent-alignment.yaml"
    },
    {
      "id": "b-carbon-capture-entropy-cost",
      "title": "Direct air carbon capture is constrained by thermodynamics — actual DAC systems consume 10-20× above the minimum work set by entropy of mixing, and closing this gap requires understanding sorbent-CO₂ kinetics at the molecular level.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "Direct air capture (DAC) of CO₂ from 420 ppm atmosphere (breakthrough gap bg-carbon-direct-air-capture) is fundamentally constrained by the second law of thermodynamics. The minimum work to separate CO₂ from air at concentration x_CO₂ is W_min = RT ln(p_total / p_CO₂) ≈ 20 kJ/mol CO₂ at 420 ppm, 25°",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-mof-sorbent-approaches-dac-thermodynamic-limit"
      ],
      "communication_gap": "DAC is studied by atmospheric scientists (climate impact), chemical engineers (process design), materials scientists (sorbent design), and economists (cost modeling) in largely separate literatures. The thermodynamic framework that unifies them is standard in chemical engineering but rarely articulated for climate scientists or materials researchers entering the field.\n",
      "translation_table": [
        {
          "field_a_term": "entropy of mixing CO₂ in air at 420 ppm",
          "field_b_term": "minimum separation work W_min = RT ln(1/x_CO₂) ≈ 20 kJ/mol",
          "note": "Thermodynamic lower bound — set by atmospheric concentration, unavoidable"
        },
        {
          "field_a_term": "sorbent regeneration temperature T_reg",
          "field_b_term": "Carnot efficiency penalty (1 − T_cold/T_hot)^{-1}",
          "note": "Higher T_reg = more energy for desorption but worse Carnot efficiency"
        },
        {
          "field_a_term": "CO₂ binding enthalpy ΔH_ads of sorbent",
          "field_b_term": "regeneration energy per mol CO₂ ≈ ΔH_ads + sensible heat",
          "note": "Strong binding captures CO₂ efficiently but requires more energy to release"
        },
        {
          "field_a_term": "sorbent degradation rate (oxidative, hydrolytic)",
          "field_b_term": "levelized cost of DAC ($/tonne CO₂)",
          "note": "Sorbent lifetime dominates capital cost at current TRL"
        },
        {
          "field_a_term": "atmospheric CO₂ concentration (ppm)",
          "field_b_term": "W_min sensitivity: doubling concentration halves W_min",
          "note": "At 840 ppm W_min ≈ 18 kJ/mol — modest reduction from concentration increase"
        }
      ],
      "references": [
        {
          "note": "House et al. (2011) Economic and energetic analysis of capturing CO₂ from ambient air. Energy Environ Sci 4:1116-1128"
        },
        {
          "note": "Fasihi et al. (2019) Techno-economic assessment of CO₂ direct air capture plants. Joule 3:1983-2049"
        },
        {
          "note": "Keith et al. (2018) A process for capturing CO₂ from the atmosphere. Joule 2:1573-1594"
        },
        {
          "note": "Lackner (2009) Capture of carbon dioxide from ambient air. Eur Phys J Special Topics 176:93-106"
        }
      ],
      "last_reviewed": "2026-05-06",
      "file": "cross-domain/thermodynamics-atmospheric-chemistry/b-carbon-capture-entropy-cost.yaml"
    },
    {
      "id": "b-maxwells-demon-computation",
      "title": "Maxwell's demon is resolved by Landauer's principle — erasing one bit of information dissipates at least kT ln 2 of energy, exactly linking Shannon information entropy to thermodynamic entropy and establishing the physical cost of logical irreversibility.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "Maxwell's demon (1867): a hypothetical being that monitors individual molecules in a partitioned gas container, opening a small door to let fast molecules pass to one side and slow ones to the other. Net result: the gas unmixes, entropy decreases, and work is seemingly extracted from a heat bath — v",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-reversible-computing-landauer-limit"
      ],
      "communication_gap": "The Maxwell's demon paradox was a 94-year mystery (1867-1961) because physicists treated it as a thermodynamics problem, while the information-theoretic resolution required importing Shannon's 1948 concepts into physics. Landauer's insight was not immediately accepted — the explicit experimental confirmation only came in 2012. Computer science curricula rarely teach thermodynamic limits; thermodynamics courses rarely discuss logical reversibility. The connection between von Neumann's information- theoretic entropy and thermodynamic entropy, first noted by von Neumann and Shannon themselves, is still not universally acknowledged as exact rather than analogical.\n",
      "translation_table": [
        {
          "field_a_term": "Maxwell's demon's memory bit",
          "field_b_term": "one bit in a computational register"
        },
        {
          "field_a_term": "memory erasure (demon resets to accept new measurement)",
          "field_b_term": "logically irreversible operation (bit reset, AND gate)"
        },
        {
          "field_a_term": "heat dissipated by erasure k_B T ln 2 per bit",
          "field_b_term": "minimum energy cost of irreversible computation (Landauer bound)"
        },
        {
          "field_a_term": "thermodynamic entropy S = k_B ln Ω",
          "field_b_term": "Shannon information entropy H = -Σ p_i log₂ p_i (S = k_B ln 2 · H)"
        },
        {
          "field_a_term": "reversible thermodynamic process (quasi-static, no entropy production)",
          "field_b_term": "logically reversible computation (Toffoli gate, Fredkin gate)"
        },
        {
          "field_a_term": "irreversible mixing of gas",
          "field_b_term": "irreversible logical operation (information loss = entropy production)"
        },
        {
          "field_a_term": "second law of thermodynamics (ΔS_universe ≥ 0)",
          "field_b_term": "Landauer bound (every irreversible bit operation costs ≥ k_B T ln 2)"
        },
        {
          "field_a_term": "Szilard engine (1 bit = one Maxwell's demon measurement)",
          "field_b_term": "single-bit memory cell (DRAM bit / flip-flop)"
        }
      ],
      "references": [
        {
          "note": "Maxwell (1871) Theory of Heat. Longmans, Green, London. (Chapter 12, demon described)"
        },
        {
          "doi": "10.1147/rd.53.0183",
          "note": "Landauer (1961) IBM J Res Dev 5:183 — Landauer's principle, erasure costs k_B T ln 2"
        },
        {
          "doi": "10.1147/rd.173.0525",
          "note": "Bennett (1973) IBM J Res Dev 17:525 — reversible computation generates no heat"
        },
        {
          "doi": "10.1038/nature10872",
          "note": "Bérut et al. (2012) Nature — experimental verification of Landauer's bound"
        },
        {
          "doi": "10.1038/nphys2169",
          "note": "Lutz & Ciliberto (2015) Physics Today — review of Landauer's principle experiments"
        }
      ],
      "last_reviewed": "2026-05-06",
      "file": "cross-domain/thermodynamics-computer-science/b-maxwells-demon-computation.yaml"
    },
    {
      "id": "b-urban-morphology-fractal-dimension-scaling",
      "title": "Urban morphology — the spatial structure of cities — exhibits fractal scaling: street networks, building footprints, and population density follow power-law distributions with fractal dimensions D ≈ 1.7-1.9, and Zipf's law governs city size distributions; these are explained by growth processes analogous to diffusion-limited aggregation and preferential attachment in complex network theory.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "The fractal dimension of an urban boundary is measured by box-counting: N(ε) ∝ ε^{-D} where N = number of boxes of size ε needed to cover the boundary. For cities, D ≈ 1.7 (London), 1.8 (Tokyo), compared to D = 2 for a completely filled plane. Urban scaling laws (Bettencourt 2013): Y ∝ N^β where Y i",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-optimal-city-structure-maximizes-innovation-minimizes-infrastructure"
      ],
      "communication_gap": "Urban planners and geographers studying city morphology have used fractal analysis since the 1990s (Batty, Longley), while physicists and mathematicians studying scaling laws independently developed city scaling theory (Bettencourt, West); the communities now overlap but the mechanistic derivation of urban scaling from first principles of social interaction remains an active challenge spanning both groups.\n",
      "translation_table": [
        {
          "field_a_term": "urban sprawl vs compact city (urban science)",
          "field_b_term": "fractal dimension D (close to 2 = compact, close to 1 = sparse) (mathematics)",
          "note": "Fractal dimension D quantifies urban compactness; D → 2 approaches solid filled geometry"
        },
        {
          "field_a_term": "city size distribution / rank-size rule (urban science)",
          "field_b_term": "Zipf's law / Pareto distribution with exponent 1 (mathematics)",
          "note": "The top 1000 cities follow P(r) ∝ r^{-1} with remarkable empirical regularity"
        },
        {
          "field_a_term": "urban scaling exponent β > 1 for innovation (urban science)",
          "field_b_term": "superlinear power-law scaling in complex networks (mathematics)",
          "note": "Cities superlinearly amplify ideas/patents because social networks scale as N^β with β > 1"
        },
        {
          "field_a_term": "self-similar street network (urban science)",
          "field_b_term": "fractal network with scale-free properties (mathematics)",
          "note": "Street networks in organically grown cities show fractal self-similarity across scales"
        }
      ],
      "references": [
        {
          "doi": "10.1126/science.1235823",
          "note": "Bettencourt (2013) - the origins of scaling in cities (superlinear scaling theory)"
        },
        {
          "doi": "10.1068/b1489",
          "note": "Batty & Longley (1994) - fractal cities (foundational urban fractal analysis)"
        },
        {
          "doi": "10.1016/0960-0779(91)90048-8",
          "note": "Frankhauser (1991) - fractal dimension of urban boundary as morphology measure"
        }
      ],
      "last_reviewed": "2026-05-07",
      "file": "cross-domain/urban-science-mathematics/b-urban-morphology-fractal-dimension-scaling.yaml"
    },
    {
      "id": "b-viral-evolution-quasispecies-fitness-landscape",
      "title": "RNA virus populations evolve as quasispecies — clouds of mutant sequences near a fitness landscape peak — a concept borrowed from the physics of spin glasses and applied to virology, explaining error catastrophe, lethal mutagenesis, and immune escape.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "Eigen's quasispecies equation describes an RNA virus population as a distribution over sequence space: ẋᵢ = Σⱼ Wᵢⱼ xⱼ − Φxᵢ, where Wᵢⱼ is the mutation-selection matrix and Φ normalizes the population. The dominant eigenvector (master sequence + mutant cloud) is the quasispecies. Above the error thre",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-lethal-mutagenesis-quasispecies-antiviral-strategy"
      ],
      "communication_gap": "Virologists and evolutionary biologists are familiar with quasispecies conceptually, but the mathematical connection to eigenvalue problems and spin-glass physics is underappreciated; most virologists use quasispecies as a metaphor rather than applying the Eigen equation quantitatively.\n",
      "translation_table": [
        {
          "field_a_term": "RNA virus mutation rate per base per replication (virology)",
          "field_b_term": "error rate in quasispecies / transition matrix entry (evolutionary biology)",
          "note": "RNA polymerase lacks proofreading; u ≈ 10⁻⁴ per base is near the error threshold"
        },
        {
          "field_a_term": "dominant viral sequence / consensus sequence (virology)",
          "field_b_term": "master sequence (dominant eigenvector of mutation-selection matrix) (evolutionary biology)",
          "note": "Consensus ≠ master sequence when the mutant cloud has a different mean fitness"
        },
        {
          "field_a_term": "lethal mutagenesis by mutagens (ribavirin) (virology)",
          "field_b_term": "driving the population above the error threshold (evolutionary biology)",
          "note": "Increasing μ beyond μ_crit destroys the quasispecies — basis of antiviral strategy"
        },
        {
          "field_a_term": "immune escape variant selection (virology)",
          "field_b_term": "adaptive walk on rugged fitness landscape (evolutionary biology)",
          "note": "Immune pressure shifts the fitness landscape; the quasispecies cloud rapidly adapts"
        }
      ],
      "references": [
        {
          "doi": "10.1007/BF02460040",
          "note": "Eigen (1971) — self-organization of matter and evolution of biological macromolecules"
        },
        {
          "doi": "10.1016/j.cell.2006.05.049",
          "note": "Domingo et al. (2006) — quasispecies dynamics and RNA virus biology"
        },
        {
          "doi": "10.1371/journal.ppat.1000005",
          "note": "Perales et al. (2010) — lethal mutagenesis and the fate of RNA viruses"
        }
      ],
      "last_reviewed": "2026-05-07",
      "file": "cross-domain/virology-evolutionary-biology/b-viral-evolution-quasispecies-fitness-landscape.yaml"
    },
    {
      "id": "b-viral-quasispecies-x-nk-rugged-landscape",
      "title": "Viral quasispecies theory treats mutant clouds as error-prone replication distributions shifting across fitness ridges — sharing landscape metaphors with Kauffman NK models where epistatic coupling creates rugged fitness surfaces with many local optima — enabling borrowings between virology escape pathways and combinatorial optimization rhetoric used in evolutionary computation.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "proposed",
      "bridge_claim": "Eigen quasispecies equations describe evolution of genotype frequencies under mutation–selection balance — equilibrium structures resemble discrete landscape climbs with mutation allowing valley crossing analogous to noise-assisted transitions on rugged NK surfaces (informal analogy; viral biology a",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-viral-quasispecies-x-nk-rugged-landscape"
      ],
      "communication_gap": "Virology papers cite empirical escape mutations without NK formalism; evolutionary computation NK papers seldom cite clinical antibody breadth assays validating ruggedness claims on specific viral proteins.\n",
      "translation_table": [
        {
          "field_a_term": "Mutation rate μ per replication (virology)",
          "field_b_term": "Neighborhood radius in landscape exploration heuristics (NK / evolutionary algorithms)",
          "note": "Higher μ broadens cloud width akin to larger exploratory neighborhoods only qualitatively."
        },
        {
          "field_a_term": "Error catastrophe threshold (Eigen)",
          "field_b_term": "Loss of localization on landscapes under excessive noise in adaptive walks",
          "note": "Conceptual alignment — distinct quantitative thresholds differ by orders of magnitude across domains."
        },
        {
          "field_a_term": "Fitness valleys between antibody-selected peaks",
          "field_b_term": "Low fitness NK saddles separating local optima",
          "note": "Shared narrative for escape pathway planning in vaccine design discussions with computational metaphor caution."
        }
      ],
      "references": [
        {
          "doi": "10.1007/BF00643356",
          "note": "Eigen & Schuster (1977) — quasispecies error threshold foundations"
        },
        {
          "doi": "10.1016/S0022-5193(87)80219-0",
          "note": "Kauffman & Levin (1987) J. Theor. Biol. — NK rugged fitness landscapes"
        }
      ],
      "last_reviewed": "2026-05-07",
      "file": "cross-domain/virology-evolutionary-biology/b-viral-quasispecies-x-nk-rugged-landscape.yaml"
    },
    {
      "id": "b-viral-quasispecies-error-threshold",
      "title": "RNA virus populations exist as quasispecies clouds near an error threshold defined by information theory: exceeding the critical mutation rate causes mutational meltdown, making the Eigen quasispecies equations a direct application of Shannon channel capacity to molecular evolution.\n",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "Eigen's quasispecies theory maps RNA virus evolution onto an information-theoretic error-correction problem: the master sequence is the optimal codeword, replication fidelity is the channel capacity, and the error threshold U_c = ln(fitness ratio) is the maximum mutation rate before the population l",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-lethal-mutagenesis-antiviral-threshold"
      ],
      "communication_gap": "Virologists study quasispecies empirically through sequencing while information theorists rarely engage with molecular evolution; the mathematical equivalence formalized by Eigen and Schuster is well-known in theoretical biology but underutilized in clinical virology and antiviral drug design.\n",
      "translation_table": [
        {
          "field_a_term": "quasispecies distribution (virology)",
          "field_b_term": "stationary distribution of a Markov mutation-selection chain (information theory)",
          "note": "The quasispecies is the dominant eigenvector of the fitness-mutation matrix W"
        },
        {
          "field_a_term": "error threshold U_c (virology)",
          "field_b_term": "Shannon channel capacity C = max I(X;Y) (information theory)",
          "note": "Exceeding U_c is analogous to exceeding channel capacity; information is irretrievably lost"
        },
        {
          "field_a_term": "master sequence / wild-type (virology)",
          "field_b_term": "codeword in an error-correcting code (information theory)",
          "note": "The master sequence is maintained by purifying selection analogous to error correction"
        },
        {
          "field_a_term": "lethal mutagenesis via mutagens (virology)",
          "field_b_term": "channel noise exceeding capacity (information theory)",
          "note": "Ribavirin and favipiravir increase mutation rate beyond the error threshold"
        }
      ],
      "references": [
        {
          "doi": "10.1007/BF01738196",
          "note": "Eigen (1971) - original quasispecies theory and error threshold concept"
        },
        {
          "doi": "10.1038/nature04244",
          "note": "Domingo & Holland (2005) - RNA virus mutations and fitness"
        },
        {
          "doi": "10.1128/JVI.00361-10",
          "note": "Perales et al. (2010) - lethal mutagenesis of HIV-1 with mutagenic nucleosides"
        }
      ],
      "last_reviewed": "2026-05-07",
      "file": "cross-domain/virology-information-theory/b-viral-quasispecies-error-threshold.yaml"
    },
    {
      "id": "b-protein-language-model-x-viral-escape-fitness-landscape",
      "title": "Protein language-model priors bridge sequence representation learning and viral escape fitness landscape forecasting.",
      "source_domain": "",
      "target_domain": "",
      "status": "proposed",
      "bridge_claim": "Speculative analogy (to be empirically validated): Protein language-model likelihoods can serve as soft constraints on viable mutational trajectories similarly to fitness-landscape priors used in viral escape forecasting.",
      "open_unknowns": [],
      "related_hypotheses": [
        "h-protein-language-model-priors-improve-viral-escape-forecasting"
      ],
      "communication_gap": "Domain operators prioritize interpretable reliability diagnostics, while ML work often prioritizes aggregate accuracy without deployment-grade uncertainty audits.",
      "translation_table": [
        {
          "field_a_term": "model prior",
          "field_b_term": "domain prior",
          "note": "Both constrain inference in data-sparse regimes."
        },
        {
          "field_a_term": "uncertainty estimate",
          "field_b_term": "risk-aware decision support",
          "note": "Uncertainty quality determines practical utility."
        },
        {
          "field_a_term": "out-of-distribution behavior",
          "field_b_term": "deployment robustness",
          "note": "Shift sensitivity governs real-world reliability."
        }
      ],
      "references": [
        {
          "arxiv": "2006.10555",
          "note": "Protein sequence language-model foundation."
        }
      ],
      "last_reviewed": "2026-05-08",
      "file": "cross-domain/virology-machine-learning/b-protein-language-model-x-viral-escape-fitness-landscape.yaml"
    },
    {
      "id": "b-magma-fragmentation-rheology",
      "title": "Explosive volcanic eruptions occur when magma fragmentation transitions from ductile to brittle as ascent rate exceeds the structural relaxation time of silicate melt, quantified by the Deborah number De = τ_relax / τ_deform comparing melt viscosity timescale to deformation rate",
      "source_domain": "",
      "target_domain": "",
      "status": "established",
      "bridge_claim": "Magma rheology controls eruptive style: when the Deborah number De = η(T,X) / (G_∞ * τ_deform) < 1, melt flows viscously (effusive eruption); when De > 1, melt behaves brittlely and fragments explosively, releasing stored elastic energy; the fragmentation threshold depends on silica content (control",
      "open_unknowns": [],
      "related_hypotheses": [],
      "communication_gap": "Volcanologists focus on field observations of eruptive products and seismic precursors while fluid mechanicists study non-Newtonian flows in industrial contexts; the shared Deborah number framework is known in theoretical volcanology but not broadly applied in engineering fluid mechanics.",
      "translation_table": [
        {
          "field_a_term": "explosive vs effusive eruption style (volcanology)",
          "field_b_term": "brittle vs ductile rheological regime (fluid mechanics)",
          "note": "Eruption style determined by whether De exceeds the fragmentation threshold ~ 0.01"
        },
        {
          "field_a_term": "magma viscosity η (volcanology)",
          "field_b_term": "non-Newtonian melt viscosity from silicate network structure (fluid mechanics)",
          "note": "η spans 10^2–10^14 Pa·s depending on temperature, silica, and water content"
        },
        {
          "field_a_term": "pyroclastic fragmentation depth (volcanology)",
          "field_b_term": "location in conduit where De crosses fragmentation threshold (fluid mechanics)",
          "note": "Fragmentation depth determines explosion energy and grain size distribution"
        },
        {
          "field_a_term": "bubble nucleation and growth in ascending magma (volcanology)",
          "field_b_term": "two-phase flow with phase change in a pressure-driven conduit (fluid mechanics)",
          "note": "Vesiculation drives volatile exsolution, modifying local De and triggering fragmentation"
        }
      ],
      "references": [
        {
          "doi": "10.1126/science.283.5397.85",
          "note": "Dingwell (1996) Science - brittle failure of viscous silicate melts and fragmentation"
        },
        {
          "doi": "10.1016/j.jvolgeores.2004.09.001",
          "note": "Papale (1999) - strain-rate fragmentation model for explosive eruptions"
        },
        {
          "doi": "10.1038/nature04153",
          "note": "Gonnermann & Manga (2007) - magma rheology and volcanic eruption dynamics"
        }
      ],
      "last_reviewed": "2026-05-07",
      "file": "cross-domain/volcanology-fluid-mechanics/b-magma-fragmentation-rheology.yaml"
    }
  ]
}