@prefix :       <https://deanofbigdatadobd.substack.com/p/part-6-your-action-plan-for-autonomous#> .
@prefix schema: <https://schema.org/> .
@prefix skos:   <http://www.w3.org/2004/02/skos/core#> .
@prefix rdfs:   <http://www.w3.org/2000/01/rdf-schema#> .
@prefix rdf:    <http://www.w3.org/1999/02/22-rdf-syntax-ns#> .
@prefix owl:    <http://www.w3.org/2002/07/owl#> .
@prefix xsd:    <http://www.w3.org/2001/XMLSchema#> .
@prefix dbo:    <http://dbpedia.org/ontology/> .
@prefix org:    <http://www.w3.org/ns/org#> .
@prefix foaf:   <http://xmlns.com/foaf/0.1/> .

# ─────────────────────────────────────────────
# Lightweight Ontology
# ─────────────────────────────────────────────

:AgentConcept a rdfs:Class ;
    rdfs:label "Agent Concept"@en ;
    rdfs:comment "Base class for the eight core concepts governing autonomous AI agent design."@en .

:IntelligenceConcept a rdfs:Class ;
    rdfs:subClassOf :AgentConcept ;
    rdfs:label "Intelligence Concept"@en ;
    rdfs:comment "Concepts that determine what makes an autonomous agent smart."@en .

:GovernanceConcept a rdfs:Class ;
    rdfs:subClassOf :AgentConcept ;
    rdfs:label "Governance Concept"@en ;
    rdfs:comment "Concepts that determine what makes an autonomous agent trustworthy."@en .

:DeploymentPhase a rdfs:Class ;
    rdfs:label "Deployment Phase"@en ;
    rdfs:comment "A phase of the autonomous AI agent deployment checklist."@en .

:HarmLevel a rdfs:Class ;
    rdfs:label "Harm Level"@en ;
    rdfs:comment "A severity classification on the Spectrum of Harm for autonomous AI failures."@en .

:TrustFoundation a rdfs:Class ;
    rdfs:label "Trust Foundation"@en ;
    rdfs:comment "One of the five foundational pillars required for trustworthy agent governance."@en .

:hasConceptNumber a rdf:Property ;
    rdfs:label "has concept number"@en ;
    rdfs:domain :AgentConcept ;
    rdfs:range xsd:integer .

:hasChecklistQuestion a rdf:Property ;
    rdfs:label "has checklist question"@en ;
    rdfs:domain :DeploymentPhase ;
    rdfs:range xsd:string .

:hasHarmSeverity a rdf:Property ;
    rdfs:label "has harm severity"@en ;
    rdfs:domain :HarmLevel ;
    rdfs:range xsd:string .

# ─────────────────────────────────────────────
# Main Article
# ─────────────────────────────────────────────

:article a schema:Article ;
    schema:name "Part 6: Your Action Plan for Autonomous AI Agents"@en ;
    schema:headline "Part 6: Your Action Plan for Autonomous AI Agents"@en ;
    schema:description "Governance: build it once and it compounds. Skip it and you pay for every failure… forever"@en ;
    schema:url "https://deanofbigdatadobd.substack.com/p/part-6-your-action-plan-for-autonomous" ;
    schema:datePublished "2026-04-27"^^xsd:date ;
    schema:dateModified "2026-04-27"^^xsd:date ;
    schema:inLanguage "en" ;
    schema:isPartOf :series ;
    schema:author :author ;
    schema:publisher :publisher ;
    schema:about :framework, :deploymentChecklist, :callahansExample, :economicArgument ;
    schema:keywords "autonomous AI agents, governance, AI safety, Entity Propensity Models, AI Utility Function, Spectrum of Harm, trust, deployment checklist"@en ;
    schema:isAccessibleForFree true ;
    schema:image :heroImage ;
    schema:hasPart :faqSection, :glossarySection, :howtoSection .

:heroImage a schema:ImageObject ;
    schema:url "https://substackcdn.com/image/fetch/f_auto,q_auto:good,fl_progressive:steep/https%3A%2F%2Fsubstack-post-media.s3.amazonaws.com%2Fpublic%2Fimages%2Fe761aa39-8afa-4cb4-8333-31f6b459415f_960x540.png" ;
    schema:width "960"^^xsd:integer ;
    schema:height "540"^^xsd:integer .

# ─────────────────────────────────────────────
# Blog Series
# ─────────────────────────────────────────────

:series a schema:BlogPosting ;
    schema:name "Autonomous AI Agents Blog Series"@en ;
    schema:description "A multi-part series exploring autonomous AI agent design, intelligence, governance, and deployment, illustrated through the Callahan family vacation use case."@en ;
    schema:hasPart :part1, :part2, :part3, :part4, :part5, :article .

:part1 a schema:BlogPosting ;
    schema:name "Part 1: The Vacation That Planned Itself"@en ;
    schema:url "https://deanofbigdatadobd.substack.com/p/the-vacation-that-planned-itself" .

:part2 a schema:BlogPosting ;
    schema:name "Part 2: Why Most AI Agents Get It Wrong"@en .

:part3 a schema:BlogPosting ;
    schema:name "Part 3: The AI Utility Function"@en .

:part4 a schema:BlogPosting ;
    schema:name "Part 4: Governance Boundaries"@en .

:part5 a schema:BlogPosting ;
    schema:name "Part 5: Compounding Value Over Time"@en .

# ─────────────────────────────────────────────
# Author & Publisher
# ─────────────────────────────────────────────

:author a schema:Person ;
    schema:name "Dean of Big Data"@en ;
    schema:alternateName "Bill Schmarzo"@en ;
    schema:url "https://substack.com/@deanofbigdata" ;
    schema:description """Teacher, Author, Chief Data Monetization Officer, USF Executive Fellow, NUI Galway Honorary Professor, Iowa State University Professor, AI Customer Innovation Specialist at Dell Technologies."""@en ;
    schema:identifier "https://twitter.com/schmarzo" ;
    owl:sameAs <https://twitter.com/schmarzo> ;
    rdfs:seeAlso <http://dbpedia.org/resource/Big_data> .

:publisher a schema:Organization ;
    schema:name "Dean of Big Data Newsletter"@en ;
    schema:url "https://deanofbigdatadobd.substack.com" ;
    schema:description "Learn how to leverage data and analytics to power your business model."@en ;
    schema:identifier "https://deanofbigdatadobd.substack.com" .

# ─────────────────────────────────────────────
# The Framework: Eight Concepts
# ─────────────────────────────────────────────

:framework a schema:CreativeWork ;
    schema:name "The Eight-Concept Framework for Autonomous AI Agents"@en ;
    schema:description """Every failure in autonomous AI agent deployments can be traced to one or more of these eight concepts being absent or misconfigured. Four govern intelligence; four govern trustworthiness."""@en ;
    schema:hasPart :epm, :elm, :aiUtilityFunction, :learningConstructs,
                   :threePractitionerQuestions, :fiveTrustFoundations,
                   :spectrumOfHarm, :fullValueLedger .

# Intelligence Concepts

:epm a :IntelligenceConcept ;
    :hasConceptNumber 1 ;
    schema:name "Entity Propensity Models (EPMs)"@en ;
    schema:description """Causal models of individual behavior, not population averages. They answer why this person behaves the way they do, and what changes when conditions change. Without them, the agent serves the average. Nobody is average."""@en ;
    schema:alternateName "EPMs"@en ;
    rdfs:seeAlso <http://dbpedia.org/resource/Causal_model> .

:elm a :IntelligenceConcept ;
    :hasConceptNumber 2 ;
    schema:name "Entity Language Models (ELMs)"@en ;
    schema:description """The contextual communication layer. Interprets EPM intelligence and adapts tone, detail, and framing to each person in each moment. Also listens: every override and non-response is a learning signal."""@en ;
    schema:alternateName "ELMs"@en ;
    rdfs:seeAlso <http://dbpedia.org/resource/Language_model> .

:aiUtilityFunction a :IntelligenceConcept ;
    :hasConceptNumber 3 ;
    schema:name "AI Utility Function"@en ;
    schema:description """The explicit, multi-dimensional definition of what 'good' means across customer, operational, societal, ethical, and environmental value, including second and third-order effects. Without it, the agent optimizes toward whatever is easiest to measure."""@en ;
    schema:hasPart :customerValue, :operationalValue, :societalValue, :ethicalValue, :environmentalValue ;
    rdfs:seeAlso <http://dbpedia.org/resource/Utility_function> .

:customerValue a schema:PropertyValue ;
    schema:name "Customer Value Dimension"@en ;
    schema:description "Value delivered directly to the individual customer being served."@en .

:operationalValue a schema:PropertyValue ;
    schema:name "Operational Value Dimension"@en ;
    schema:description "Value realized through efficiency, cost reduction, and process improvement."@en .

:societalValue a schema:PropertyValue ;
    schema:name "Societal Value Dimension"@en ;
    schema:description "Broader impact on communities and social outcomes."@en .

:ethicalValue a schema:PropertyValue ;
    schema:name "Ethical Value Dimension"@en ;
    schema:description "Alignment with ethical standards and fairness principles."@en .

:environmentalValue a schema:PropertyValue ;
    schema:name "Environmental Value Dimension"@en ;
    schema:description "Impact on environmental sustainability and ecological outcomes."@en .

:learningConstructs a :IntelligenceConcept ;
    :hasConceptNumber 4 ;
    schema:name "Learning Constructs"@en ;
    schema:description """The feedback mechanisms that compound value over time. Marginal Propensity to Learn (MPL) measures improvement per interaction. The Law of Compounding turns 1% consistent improvement into a 37.8x advantage over a year."""@en ;
    schema:hasPart :mpl, :lawOfCompounding .

:mpl a schema:PropertyValue ;
    schema:name "Marginal Propensity to Learn (MPL)"@en ;
    schema:alternateName "MPL"@en ;
    schema:description "Measures the rate of improvement the agent achieves per interaction."@en .

:lawOfCompounding a schema:PropertyValue ;
    schema:name "Law of Compounding"@en ;
    schema:description "1% consistent improvement compounded daily yields a 37.8x advantage over one year."@en ;
    schema:value "37.8x annual advantage from 1% daily improvement"@en .

# Governance Concepts

:threePractitionerQuestions a :GovernanceConcept ;
    :hasConceptNumber 5 ;
    schema:name "Three Practitioner Questions"@en ;
    schema:description """Three questions that must be answered before deployment, encoded into the architecture, and enforced by design — not by intention."""@en ;
    schema:hasPart :questionCanDo, :questionMustAsk, :questionCanNever .

:questionCanDo a schema:Question ;
    schema:name "What can the agent do without asking?"@en ;
    schema:text "What can the agent do without asking?"@en ;
    schema:description "Defines the autonomous action space — decisions the agent may execute without human confirmation."@en .

:questionMustAsk a schema:Question ;
    schema:name "What must the agent ask before acting?"@en ;
    schema:text "What must it ask before acting?"@en ;
    schema:description "Defines the confirmation boundary — decisions requiring explicit human approval before execution."@en .

:questionCanNever a schema:Question ;
    schema:name "What can the agent never do?"@en ;
    schema:text "What can it never do, regardless of who asks?"@en ;
    schema:description "Defines the absolute prohibition space — actions the agent must refuse regardless of instruction source, tone, or authority."@en .

:fiveTrustFoundations a :GovernanceConcept ;
    :hasConceptNumber 6 ;
    schema:name "Five Trust Foundations"@en ;
    schema:description "All five are required. None is sufficient alone."@en ;
    schema:hasPart :trustSecurity, :trustPrivacy, :trustIdentity, :trustAccountability, :trustContext .

:trustSecurity a :TrustFoundation ;
    schema:name "Security"@en ;
    schema:description "Enforced authorization boundaries."@en .

:trustPrivacy a :TrustFoundation ;
    schema:name "Privacy"@en ;
    schema:description "Sensitive information restricted by default."@en .

:trustIdentity a :TrustFoundation ;
    schema:name "Identity"@en ;
    schema:description "Verified knowledge of who is authorized."@en .

:trustAccountability a :TrustFoundation ;
    schema:name "Accountability"@en ;
    schema:description "Every significant action is attributable and logged."@en .

:trustContext a :TrustFoundation ;
    schema:name "Context"@en ;
    schema:description "Causal understanding of who is affected and how."@en .

:spectrumOfHarm a :GovernanceConcept ;
    :hasConceptNumber 7 ;
    schema:name "Spectrum of Harm"@en ;
    schema:description """The same design failures produce Nuisance, Serious, or Catastrophic outcomes depending on the domain and its reversibility. The capability does not change. What changes is whether the damage can be undone."""@en ;
    schema:hasPart :harmNuisance, :harmSerious, :harmCatastrophic .

:harmNuisance a :HarmLevel ;
    :hasHarmSeverity "Low"@en ;
    schema:name "Nuisance"@en ;
    schema:description "Outcomes that are inconvenient or annoying but recoverable with minimal cost."@en .

:harmSerious a :HarmLevel ;
    :hasHarmSeverity "Medium"@en ;
    schema:name "Serious"@en ;
    schema:description "Outcomes with significant negative impact requiring substantial recovery effort."@en .

:harmCatastrophic a :HarmLevel ;
    :hasHarmSeverity "High"@en ;
    schema:name "Catastrophic"@en ;
    schema:description "Irreversible outcomes in high-stakes domains with permanent or severe consequences."@en .

:fullValueLedger a :GovernanceConcept ;
    :hasConceptNumber 8 ;
    schema:name "Full Value Ledger"@en ;
    schema:description """Complete economic accounting across all value dimensions. Agent Unit Economics that counts only compute costs and efficiency gains while ignoring governance failures and unauthorized actions is a partial ledger that systematically undercounts risk."""@en ;
    schema:hasPart :agentUnitEconomics .

:agentUnitEconomics a schema:PropertyValue ;
    schema:name "Agent Unit Economics"@en ;
    schema:description "Financial model that must account for compute costs, maintenance costs, AND governance failure costs to provide a complete economic picture."@en .

# ─────────────────────────────────────────────
# Deployment Checklist
# ─────────────────────────────────────────────

:deploymentChecklist a schema:HowTo ;
    schema:name "The Deployment Checklist: Three Phases, Twelve Questions"@en ;
    schema:description "A structured sequence for deploying autonomous agents grounded in causal intelligence, aligned to a clear definition of value, and governed by explicit boundaries."@en ;
    schema:step :phase1, :phase2, :phase3 .

:phase1 a :DeploymentPhase, schema:HowToSection ;
    schema:name "Phase 1: Before You Define the Agent"@en ;
    schema:description "The foundational phase where most failures originate. Requires a diverse set of stakeholders and significant time investment."@en ;
    schema:position 1 ;
    :hasChecklistQuestion "Have you identified the specific entities the agent will serve and built causal models of their individual behavioral patterns?" ;
    :hasChecklistQuestion "Have you defined the AI Utility Function explicitly across all value dimensions, in a way specific enough to adjudicate trade-offs?" ;
    :hasChecklistQuestion "Have you determined where on the Spectrum of Harm a failure in this domain lands, and designed accordingly?" ;
    :hasChecklistQuestion "Have you answered the Three Practitioner Questions in writing before any architecture decisions are made?" .

:phase2 a :DeploymentPhase, schema:HowToSection ;
    schema:name "Phase 2: Before You Deploy the Agent"@en ;
    schema:description "Architecture-level governance validation before the agent touches real-world systems."@en ;
    schema:position 2 ;
    :hasChecklistQuestion "Are all Five Trust Foundations built into the architecture: Security, Privacy, Identity, Accountability, and Context?" ;
    :hasChecklistQuestion "Are authorization boundaries enforced at execution time, not just documented in a policy?" ;
    :hasChecklistQuestion "Does the agent have a verified, persistent model of who is authorized to instruct it that cannot be overridden by tone, urgency, or claimed authority?" ;
    :hasChecklistQuestion "Have you stress-tested the agent against social engineering scenarios before it touches real-world systems?" .

:phase3 a :DeploymentPhase, schema:HowToSection ;
    schema:name "Phase 3: Before You Scale the Agent"@en ;
    schema:description "Scaling governance to ensure compounding value and multi-agent system safety."@en ;
    schema:position 3 ;
    :hasChecklistQuestion "Are you measuring performance across the Full Value Ledger, not just cost and efficiency?" ;
    :hasChecklistQuestion "Are Learning Constructs functioning: is the agent measurably improving with each interaction, and are human overrides captured as learning signals?" ;
    :hasChecklistQuestion "If this agent operates alongside others, have you assessed how a governance failure in one propagates across the system?" ;
    :hasChecklistQuestion "Does your Agent Unit Economics calculation include the cost of governance failures alongside compute and maintenance costs?" .

# ─────────────────────────────────────────────
# The Callahan Family Case Study
# ─────────────────────────────────────────────

:callahansExample a schema:CreativeWork ;
    schema:name "The Callahan Family Vacation Case Study"@en ;
    schema:description """A running case study throughout the series: five people, five behavioral profiles, five definitions of a good outcome, and one agent trying to serve all of them at once. The agent failure produced a $1,625 vacation disaster."""@en ;
    schema:about :callahans, :vacationAgent, :vacationFailure .

:callahans a schema:Person ;
    schema:name "The Callahan Family"@en ;
    schema:description "A family of five, each with distinct behavioral profiles and definitions of a 'good' vacation outcome, used as the primary case study for autonomous agent design."@en .

:vacationAgent a schema:SoftwareApplication ;
    schema:name "Vacation Planning Agent"@en ;
    schema:description "The autonomous AI agent tasked with planning a family vacation. Its failure illustrated what happens without EPMs, AI Utility Function, and governance boundaries."@en .

:vacationFailure a schema:Event ;
    schema:name "The $1,625 Vacation Disaster"@en ;
    schema:description "The predictable outcome when an autonomous agent operates without causal intelligence, a clear definition of value, or governance boundaries."@en ;
    schema:offers :financialLoss .

:financialLoss a schema:Offer ;
    schema:price "1625"^^xsd:decimal ;
    schema:priceCurrency "USD" ;
    schema:description "Financial loss resulting from the autonomous agent's failure to account for individual behavioral profiles."@en .

# ─────────────────────────────────────────────
# Economic Argument
# ─────────────────────────────────────────────

:economicArgument a schema:CreativeWork ;
    schema:name "The Economic Argument for Proactive Governance"@en ;
    schema:description "Organizations that build governance from the start make the investment once and inherit the benefit across every deployment, compounding advantage over time. Organizations that govern reactively pay for every failure individually."@en ;
    schema:hasPart :proactiveGovernance, :reactiveGovernance .

:proactiveGovernance a schema:PropertyValue ;
    schema:name "Proactive Governance"@en ;
    schema:description "Build governance from the start: one-time investment that compounds advantage across every deployment."@en ;
    schema:value "Pay once, compound advantage forever"@en .

:reactiveGovernance a schema:PropertyValue ;
    schema:name "Reactive Governance"@en ;
    schema:description "Govern reactively: pay for every failure individually with no framework to prevent the next one, and escalating liability as agents scale into higher-stakes domains."@en ;
    schema:value "Pay per failure, escalating liability"@en .

# ─────────────────────────────────────────────
# FAQ Section (schema:FAQPage)
# ─────────────────────────────────────────────

:faqSection a schema:FAQPage ;
    schema:name "Frequently Asked Questions: Autonomous AI Agent Action Plan"@en ;
    schema:mainEntity :q1, :q2, :q3, :q4, :q5, :q6, :q7, :q8, :q9, :q10, :q11, :q12 .

:q1 a schema:Question ;
    schema:name "Why do autonomous AI agents fail?"@en ;
    schema:text "Why do autonomous AI agents fail?"@en ;
    schema:acceptedAnswer [
        a schema:Answer ;
        schema:text "Autonomous AI agents do not fail because the technology is not capable. They fail because the architecture is incomplete — specifically, because one or more of the eight core concepts (EPMs, ELMs, AI Utility Function, Learning Constructs, Three Practitioner Questions, Five Trust Foundations, Spectrum of Harm, Full Value Ledger) is absent or misconfigured."@en
    ] .

:q2 a schema:Question ;
    schema:name "What is an Entity Propensity Model (EPM)?"@en ;
    schema:text "What is an Entity Propensity Model (EPM)?"@en ;
    schema:acceptedAnswer [
        a schema:Answer ;
        schema:text "An EPM is a causal model of individual behavior — not a population average. It answers why this specific person behaves the way they do, and predicts what changes when conditions change. Without EPMs, the agent serves the average. Nobody is average."@en
    ] .

:q3 a schema:Question ;
    schema:name "What is the AI Utility Function?"@en ;
    schema:text "What is the AI Utility Function?"@en ;
    schema:acceptedAnswer [
        a schema:Answer ;
        schema:text "The AI Utility Function is the explicit, multi-dimensional definition of what 'good' means for an autonomous agent, spanning customer, operational, societal, ethical, and environmental value — including second and third-order effects. Without it, agents optimize toward whatever is easiest to measure."@en
    ] .

:q4 a schema:Question ;
    schema:name "What are the Three Practitioner Questions?"@en ;
    schema:text "What are the Three Practitioner Questions that must be answered before deploying an autonomous agent?"@en ;
    schema:acceptedAnswer [
        a schema:Answer ;
        schema:text "The three questions are: (1) What can the agent do without asking? (2) What must it ask before acting? (3) What can it never do, regardless of who asks? All three must be answered before deployment, encoded into the architecture, and enforced by design — not by intention."@en
    ] .

:q5 a schema:Question ;
    schema:name "What are the Five Trust Foundations?"@en ;
    schema:text "What are the Five Trust Foundations required for trustworthy autonomous agents?"@en ;
    schema:acceptedAnswer [
        a schema:Answer ;
        schema:text "The Five Trust Foundations are: Security (enforced authorization boundaries), Privacy (sensitive information restricted by default), Identity (verified knowledge of who is authorized), Accountability (every significant action attributable and logged), and Context (causal understanding of who is affected and how). All five are required; none is sufficient alone."@en
    ] .

:q6 a schema:Question ;
    schema:name "What is the Spectrum of Harm?"@en ;
    schema:text "What is the Spectrum of Harm for autonomous AI agents?"@en ;
    schema:acceptedAnswer [
        a schema:Answer ;
        schema:text "The Spectrum of Harm describes how the same design failures produce Nuisance, Serious, or Catastrophic outcomes depending on the domain and its reversibility. The agent capability does not change — what changes is whether the damage can be undone."@en
    ] .

:q7 a schema:Question ;
    schema:name "What is the Full Value Ledger?"@en ;
    schema:text "What is the Full Value Ledger in autonomous agent economics?"@en ;
    schema:acceptedAnswer [
        a schema:Answer ;
        schema:text "The Full Value Ledger is complete economic accounting across all value dimensions. Agent Unit Economics that counts only compute costs and efficiency gains — while ignoring governance failures and unauthorized actions — is a partial ledger that systematically undercounts risk."@en
    ] .

:q8 a schema:Question ;
    schema:name "Why does Phase 1 matter most?"@en ;
    schema:text "Why is Phase 1 (Before You Define the Agent) the most important deployment phase?"@en ;
    schema:acceptedAnswer [
        a schema:Answer ;
        schema:text "Phase 1 is where most failures originate. Organizations that skip directly to building and deploying agents — before defining what agents should do, who they serve, and their operational boundaries — set the conditions for predictable failure. Phase 1 requires significant time and a diverse set of stakeholders."@en
    ] .

:q9 a schema:Question ;
    schema:name "What is Marginal Propensity to Learn (MPL)?"@en ;
    schema:text "What is Marginal Propensity to Learn (MPL)?"@en ;
    schema:acceptedAnswer [
        a schema:Answer ;
        schema:text "MPL measures the rate of improvement an autonomous agent achieves per interaction. It is a key metric within Learning Constructs. Combined with the Law of Compounding, a 1% consistent improvement translates to a 37.8x performance advantage over one year."@en
    ] .

:q10 a schema:Question ;
    schema:name "What lessons does the Callahan family case teach?"@en ;
    schema:text "What does the Callahan family vacation case study teach about autonomous AI agents?"@en ;
    schema:acceptedAnswer [
        a schema:Answer ;
        schema:text "The Callahan case demonstrates that a successful autonomous agent is not lucky — it succeeds because someone made deliberate decisions before deployment about what the agent could know, could do, had to ask about, and could never do. Those decisions were not constraints on capability; they were the conditions that made the capability trustworthy."@en
    ] .

:q11 a schema:Question ;
    schema:name "Why is reactive governance economically inferior?"@en ;
    schema:text "Why is reactive governance economically inferior to proactive governance?"@en ;
    schema:acceptedAnswer [
        a schema:Answer ;
        schema:text "Organizations that govern reactively pay for every failure individually, with no framework to prevent the next one, and face escalating liability as agents scale into higher-stakes domains. Organizations that build governance from the start make the investment once and inherit compounding advantage across every deployment."@en
    ] .

:q12 a schema:Question ;
    schema:name "How should authorization boundaries be enforced?"@en ;
    schema:text "How should authorization boundaries be enforced in autonomous AI agents?"@en ;
    schema:acceptedAnswer [
        a schema:Answer ;
        schema:text "Authorization boundaries must be enforced at execution time — not merely documented in a policy. The agent must have a verified, persistent model of who is authorized to instruct it that cannot be overridden by tone, urgency, or claimed authority. Stress-testing against social engineering scenarios is required before deployment to real-world systems."@en
    ] .

# ─────────────────────────────────────────────
# Glossary (skos:ConceptScheme + schema:DefinedTermSet)
# ─────────────────────────────────────────────

:glossarySection a skos:ConceptScheme, schema:DefinedTermSet ;
    schema:name "Glossary: Autonomous AI Agent Action Plan"@en ;
    skos:prefLabel "Autonomous AI Agent Glossary"@en ;
    skos:hasTopConcept :termEPM, :termELM, :termAIUtilityFunction, :termLearningConstructs,
                       :termMPL, :termSpectrumOfHarm, :termFullValueLedger,
                       :termAgentUnitEconomics, :termFiveTrustFoundations, :termThreePractitionerQuestions .

:termEPM a skos:Concept, schema:DefinedTerm ;
    skos:prefLabel "Entity Propensity Model (EPM)"@en ;
    schema:name "Entity Propensity Model (EPM)"@en ;
    schema:description "A causal behavioral model of an individual entity (person or organization), capturing why they behave the way they do and how their behavior changes under different conditions."@en ;
    skos:inScheme :glossarySection .

:termELM a skos:Concept, schema:DefinedTerm ;
    skos:prefLabel "Entity Language Model (ELM)"@en ;
    schema:name "Entity Language Model (ELM)"@en ;
    schema:description "The contextual communication layer that interprets EPM intelligence and adapts tone, detail, and framing to each individual in each moment. Treats overrides and non-responses as learning signals."@en ;
    skos:inScheme :glossarySection .

:termAIUtilityFunction a skos:Concept, schema:DefinedTerm ;
    skos:prefLabel "AI Utility Function"@en ;
    schema:name "AI Utility Function"@en ;
    schema:description "An explicit, multi-dimensional definition of what 'good' means for an autonomous agent, spanning customer, operational, societal, ethical, and environmental value dimensions."@en ;
    skos:inScheme :glossarySection .

:termLearningConstructs a skos:Concept, schema:DefinedTerm ;
    skos:prefLabel "Learning Constructs"@en ;
    schema:name "Learning Constructs"@en ;
    schema:description "Feedback mechanisms that enable autonomous agents to improve with each interaction, measured by Marginal Propensity to Learn and compounded over time."@en ;
    skos:inScheme :glossarySection .

:termMPL a skos:Concept, schema:DefinedTerm ;
    skos:prefLabel "Marginal Propensity to Learn (MPL)"@en ;
    schema:name "Marginal Propensity to Learn (MPL)"@en ;
    schema:description "A metric measuring the rate of improvement an autonomous agent achieves per interaction. A 1% daily MPL compounds to a 37.8x annual advantage."@en ;
    skos:inScheme :glossarySection .

:termSpectrumOfHarm a skos:Concept, schema:DefinedTerm ;
    skos:prefLabel "Spectrum of Harm"@en ;
    schema:name "Spectrum of Harm"@en ;
    schema:description "A classification framework mapping autonomous agent failures to Nuisance, Serious, or Catastrophic outcomes based on domain and reversibility of damage."@en ;
    skos:inScheme :glossarySection .

:termFullValueLedger a skos:Concept, schema:DefinedTerm ;
    skos:prefLabel "Full Value Ledger"@en ;
    schema:name "Full Value Ledger"@en ;
    schema:description "Complete economic accounting for autonomous agents that includes compute costs, maintenance costs, AND governance failure costs — not merely efficiency gains."@en ;
    skos:inScheme :glossarySection .

:termAgentUnitEconomics a skos:Concept, schema:DefinedTerm ;
    skos:prefLabel "Agent Unit Economics"@en ;
    schema:name "Agent Unit Economics"@en ;
    schema:description "The per-agent financial model covering all cost and value dimensions. Incomplete if it excludes governance failure costs."@en ;
    skos:inScheme :glossarySection .

:termFiveTrustFoundations a skos:Concept, schema:DefinedTerm ;
    skos:prefLabel "Five Trust Foundations"@en ;
    schema:name "Five Trust Foundations"@en ;
    schema:description "The five pillars — Security, Privacy, Identity, Accountability, Context — that must all be present in any trustworthy autonomous agent architecture."@en ;
    skos:inScheme :glossarySection .

:termThreePractitionerQuestions a skos:Concept, schema:DefinedTerm ;
    skos:prefLabel "Three Practitioner Questions"@en ;
    schema:name "Three Practitioner Questions"@en ;
    schema:description "The three governance boundary questions (can/must/never do) that must be answered in writing before any architecture decisions are made for an autonomous agent."@en ;
    skos:inScheme :glossarySection .

# ─────────────────────────────────────────────
# HowTo: Deploying a Trustworthy Autonomous Agent
# ─────────────────────────────────────────────

:howtoSection a schema:HowTo ;
    schema:name "How to Deploy a Trustworthy Autonomous AI Agent"@en ;
    schema:description "A step-by-step guide to designing and deploying autonomous AI agents that deliver relevant, accurate, and responsible outcomes."@en ;
    schema:step :step1, :step2, :step3, :step4, :step5, :step6, :step7 .

:step1 a schema:HowToStep ;
    schema:name "Build Entity Propensity Models"@en ;
    schema:position 1 ;
    schema:text "Identify all entities the agent will serve. Build causal behavioral models for each — capturing individual patterns, triggers, and change responses — not population averages."@en .

:step2 a schema:HowToStep ;
    schema:name "Define the AI Utility Function"@en ;
    schema:position 2 ;
    schema:text "Explicitly define what 'good' means across all value dimensions: customer, operational, societal, ethical, and environmental. Make it specific enough to adjudicate real trade-offs."@en .

:step3 a schema:HowToStep ;
    schema:name "Map the Spectrum of Harm"@en ;
    schema:position 3 ;
    schema:text "Determine where failures in your domain land on the Spectrum of Harm (Nuisance, Serious, Catastrophic) and design governance proportionally to the irreversibility of potential damage."@en .

:step4 a schema:HowToStep ;
    schema:name "Answer the Three Practitioner Questions"@en ;
    schema:position 4 ;
    schema:text "Write down, before any architecture decisions: (1) what the agent can do autonomously, (2) what it must confirm with humans, and (3) what it must never do regardless of instructions."@en .

:step5 a schema:HowToStep ;
    schema:name "Embed the Five Trust Foundations"@en ;
    schema:position 5 ;
    schema:text "Build Security, Privacy, Identity, Accountability, and Context into the architecture — not into documentation. Enforce authorization at execution time. Stress-test against social engineering before deployment."@en .

:step6 a schema:HowToStep ;
    schema:name "Activate Learning Constructs"@en ;
    schema:position 6 ;
    schema:text "Instrument the agent to capture every interaction outcome, override, and non-response as a learning signal. Track MPL to confirm the agent is improving, and use compounding as a competitive measurement."@en .

:step7 a schema:HowToStep ;
    schema:name "Measure with the Full Value Ledger"@en ;
    schema:position 7 ;
    schema:text "Before scaling, validate Agent Unit Economics against the Full Value Ledger — include compute costs, maintenance, AND governance failure costs. Never accept a partial ledger as a basis for scaling decisions."@en .
