@base <https://www.linkedin.com/pulse/honest-problem-ai-governance-michal-rodzos-krgmc/> .
@prefix schema: <https://schema.org/> .
@prefix owl: <https://www.w3.org/2002/07/owl#> .

<#Article> a schema:Article ;
  schema:headline "The Honest Problem with AI Governance"@en ;
  schema:name "The Honest Problem with AI Governance"@en ;
  schema:alternativeHeadline "Why assurance, not policy theatre, is the real control problem for AI"@en ;
  schema:datePublished "2026-04-27" ;
  schema:inLanguage "en" ;
  schema:url <https://www.linkedin.com/pulse/honest-problem-ai-governance-michal-rodzos-krgmc/> ;
  schema:publisher <#Publisher> ;
  schema:author <https://www.linkedin.com/in/michalrodzos#this> ;
  schema:isPartOf <#Newsletter> ;
  schema:abstract "Michal Rodzos argues that AI governance fails when organisations try to govern high-variety systems with low-variety controls such as static policies, light risk registers, and symbolic assurance practices."@en ;
  schema:articleBody "The article frames AI governance as a control-variety problem. Drawing on Ashby's Law of Requisite Variety, it argues that governance must match the complexity of the system being governed. That becomes difficult with AI because models are opaque, capable in ways their builders did not fully anticipate, trained on data that users cannot adequately inspect, and prone to drift after deployment. The article uses AWS and Azure failure cases to show that even hyperscalers need stronger human checkpoints around AI-assisted change. Its constructive argument is that risk and assurance professionals should treat AI governance as a reasonable-assurance discipline: test outputs, monitor continuously, design escalation paths, and build defensibility through evidence rather than pretending that full explainability or complete prevention already exists."@en ;
  schema:text "Most organisations are trying to govern AI systems with a risk register, a handful of policies, and good intentions. It's not enough."@en ;
  schema:hasPart <#Section-Intro>, <#Section-Hyperscalers>, <#Section-Hardness>, <#Section-Gap>, <#Section-Assurance>, <#Section-Trust>, <#Section-Transparency>, <#DefinedTerms>, <#FAQ>, <#HowTo-ReasonableAssurance>, <#Discussion> ;
  schema:about <#ashbys-law>, <#governance-theatre>, <#trust-deficit>, <#aws-ai-outage>, <#azure-front-door-outage>, <#human-checkpoint>, <#model-opacity>, <#emergent-capability>, <#training-data-opacity>, <#model-drift>, <#intelligent-government-report>, <#adoption-without-assurance>, <#requisite-variety-gap>, <#reasonable-assurance>, <#output-testing>, <#continuous-monitoring>, <#vendor-opacity>, <#public-trust>, <#defensibility>, <#contestability>, <#ai-assisted-drafting>, <#human-supervision>, <#fabricated-reference-risk> ;
  schema:mentions <#DefinedTerms>, <#FAQ>, <#HowTo-ReasonableAssurance>, <#Discussion>, <#Newsletter>, <#Step1>, <#Step2>, <#Step3>, <#Step4>, <#Step5>, <#Q1>, <#Q2>, <#Q3>, <#Q4>, <#Q5>, <#Q6>, <#Q7>, <#Q8>, <#Q9>, <#Q10>, <#Q1-answer>, <#Q2-answer>, <#Q3-answer>, <#Q4-answer>, <#Q5-answer>, <#Q6-answer>, <#Q7-answer>, <#Q8-answer>, <#Q9-answer>, <#Q10-answer>, <#Ref-Ashby>, <#Ref-TrustStudy>, <#Ref-KPMGGov>, <#Ref-AmazonRebuttal>, <#Ref-CNBCAWS>, <#Ref-AzureReview>, <#Ref-MIT>, <#Ref-TechCrunchO3>, <#Comment-PhilipPinol>, <#Comment-PaulRomanOnPhilip>, <#Comment-NicolasFigay>, <#Comment-MichalReplyToNicolas>, <#Comment-YasirAbbas>, <#Comment-PaulRomanOnYasir>, <#Comment-PaulRoman>, <#Comment-SecurityAtlasAI>, <#Comment-KasimK>, <#Comment-LeonTsvasman>, <#Comment-AlbertoSurina>, <#Comment-BartoszPiwcewicz>, <#VisibleReactionCount>, <#VisibleCommentCount>, <https://www.linkedin.com/in/michalrodzos#this>, <#Publisher> .

<#Publisher> a schema:Organization ;
  schema:name "LinkedIn Pulse"@en ;
  schema:url <https://www.linkedin.com/> .

<#Newsletter> a schema:CreativeWorkSeries ;
  schema:name "Governing What Matters"@en ;
  schema:url <https://www.linkedin.com/newsletters/governing-what-matters-7452170966060711936/> ;
  schema:text "113 subscribers"@en .

<https://www.linkedin.com/in/michalrodzos#this> a schema:Person ;
  schema:name "Michal Rodzos"@en ;
  schema:url <https://www.linkedin.com/in/michalrodzos/> ;
  schema:jobTitle "Director, Actuarial & Advanced Analytics | KPMG Australia | Government & Defence | AI, Data Science & Risk Modelling"@en .

<#VisibleReactionCount> a schema:InteractionCounter ;
  schema:interactionType schema:LikeAction ;
  schema:userInteractionCount "19" .

<#VisibleCommentCount> a schema:InteractionCounter ;
  schema:interactionType schema:CommentAction ;
  schema:userInteractionCount "18" .

<#DefinedTerms> a schema:DefinedTermSet ;
  schema:name "Defined terms for The Honest Problem with AI Governance"@en ;
  schema:hasPart <#ashbys-law>, <#governance-theatre>, <#trust-deficit>, <#aws-ai-outage>, <#azure-front-door-outage>, <#human-checkpoint>, <#model-opacity>, <#emergent-capability>, <#training-data-opacity>, <#model-drift>, <#intelligent-government-report>, <#adoption-without-assurance>, <#requisite-variety-gap>, <#reasonable-assurance>, <#output-testing>, <#continuous-monitoring>, <#vendor-opacity>, <#public-trust>, <#defensibility>, <#contestability>, <#ai-assisted-drafting>, <#human-supervision>, <#fabricated-reference-risk> ;
  schema:isPartOf <#Article> .

<#FAQ> a schema:FAQPage ;
  schema:name "FAQ for The Honest Problem with AI Governance"@en ;
  schema:mainEntity <#Q1>, <#Q2>, <#Q3>, <#Q4>, <#Q5>, <#Q6>, <#Q7>, <#Q8>, <#Q9>, <#Q10> ;
  schema:hasPart <#Q1>, <#Q2>, <#Q3>, <#Q4>, <#Q5>, <#Q6>, <#Q7>, <#Q8>, <#Q9>, <#Q10>, <#Q1-answer>, <#Q2-answer>, <#Q3-answer>, <#Q4-answer>, <#Q5-answer>, <#Q6-answer>, <#Q7-answer>, <#Q8-answer>, <#Q9-answer>, <#Q10-answer> ;
  schema:isPartOf <#Article> .

<#Discussion> a schema:Comment ;
  schema:name "Visible LinkedIn discussion layer"@en ;
  schema:text "Visible comment thread extracted from the article page, including top-level comments and visible replies shown in the logged-in sidebar."@en ;
  schema:hasPart <#Comment-PhilipPinol>, <#Comment-PaulRomanOnPhilip>, <#Comment-NicolasFigay>, <#Comment-MichalReplyToNicolas>, <#Comment-YasirAbbas>, <#Comment-PaulRomanOnYasir>, <#Comment-PaulRoman>, <#Comment-SecurityAtlasAI>, <#Comment-KasimK>, <#Comment-LeonTsvasman>, <#Comment-AlbertoSurina>, <#Comment-BartoszPiwcewicz> ;
  schema:isPartOf <#Article> .

<#ashbys-law> a schema:DefinedTerm ;
  schema:name "Ashby's Law of Requisite Variety"@en ;
  schema:description "The cybernetic principle that a control system must match the complexity of what it governs."@en ;
  schema:isPartOf <#DefinedTerms>, <#Article> .

<#governance-theatre> a schema:DefinedTerm ;
  schema:name "Governance theatre"@en ;
  schema:description "Impressive-sounding governance activity that does not materially control or assure AI behavior."@en ;
  schema:isPartOf <#DefinedTerms>, <#Article> .

<#trust-deficit> a schema:DefinedTerm ;
  schema:name "AI trust deficit"@en ;
  schema:description "The gap between increasing AI adoption and declining public confidence in AI systems."@en ;
  schema:isPartOf <#DefinedTerms>, <#Article> .

<#aws-ai-outage> a schema:DefinedTerm ;
  schema:name "AWS AI-assisted outage case"@en ;
  schema:description "The cited late-2025 AWS incident used to illustrate the need for human review around AI-assisted infrastructure change."@en ;
  schema:isPartOf <#DefinedTerms>, <#Article> .

<#azure-front-door-outage> a schema:DefinedTerm ;
  schema:name "Azure Front Door outage case"@en ;
  schema:description "The cited Azure configuration incident used to show how one change can cascade across complex systems."@en ;
  schema:isPartOf <#DefinedTerms>, <#Article> .

<#human-checkpoint> a schema:DefinedTerm ;
  schema:name "Human checkpoint"@en ;
  schema:description "A mandatory human review or approval step added before high-consequence AI-assisted changes become real."@en ;
  schema:isPartOf <#DefinedTerms>, <#Article> .

<#model-opacity> a schema:DefinedTerm ;
  schema:name "Model opacity"@en ;
  schema:description "The difficulty of seeing inside a model well enough to understand its reasoning rather than just its outputs."@en ;
  schema:isPartOf <#DefinedTerms>, <#Article> .

<#emergent-capability> a schema:DefinedTerm ;
  schema:name "Emergent capability"@en ;
  schema:description "A model behavior or competence that appears beyond what designers expected from the training objective."@en ;
  schema:isPartOf <#DefinedTerms>, <#Article> .

<#training-data-opacity> a schema:DefinedTerm ;
  schema:name "Training-data opacity"@en ;
  schema:description "The inability to fully inspect or audit the data and design decisions that shaped a model's learned weights."@en ;
  schema:isPartOf <#DefinedTerms>, <#Article> .

<#model-drift> a schema:DefinedTerm ;
  schema:name "Model drift"@en ;
  schema:description "Performance change over time as the environment diverges from the model's training conditions."@en ;
  schema:isPartOf <#DefinedTerms>, <#Article> .

<#intelligent-government-report> a schema:DefinedTerm ;
  schema:name "KPMG Intelligent Government report"@en ;
  schema:description "A 2025 leadership study used to quantify the mismatch between AI ambition and governance readiness."@en ;
  schema:isPartOf <#DefinedTerms>, <#Article> .

<#adoption-without-assurance> a schema:DefinedTerm ;
  schema:name "Adoption without assurance"@en ;
  schema:description "The state in which organisations scale AI faster than they scale evidence-backed governance and control."@en ;
  schema:isPartOf <#DefinedTerms>, <#Article> .

<#requisite-variety-gap> a schema:DefinedTerm ;
  schema:name "Requisite variety gap"@en ;
  schema:description "The mismatch between system complexity and the complexity of governance controls applied to it."@en ;
  schema:isPartOf <#DefinedTerms>, <#Article> .

<#reasonable-assurance> a schema:DefinedTerm ;
  schema:name "Reasonable assurance"@en ;
  schema:description "An evidence-based control standard that seeks sufficient confidence rather than impossible certainty."@en ;
  schema:isPartOf <#DefinedTerms>, <#Article> .

<#output-testing> a schema:DefinedTerm ;
  schema:name "Output testing"@en ;
  schema:description "Testing model behavior directly for quality, bias, or boundary adherence instead of relying on full internal explainability."@en ;
  schema:isPartOf <#DefinedTerms>, <#Article> .

<#continuous-monitoring> a schema:DefinedTerm ;
  schema:name "Continuous monitoring"@en ;
  schema:description "Ongoing observation of system behavior after deployment to detect drift, failures, or unexpected behavior."@en ;
  schema:isPartOf <#DefinedTerms>, <#Article> .

<#vendor-opacity> a schema:DefinedTerm ;
  schema:name "Vendor opacity"@en ;
  schema:description "The governance problem created when commercial AI providers do not expose enough internal detail for direct audit."@en ;
  schema:isPartOf <#DefinedTerms>, <#Article> .

<#public-trust> a schema:DefinedTerm ;
  schema:name "Public trust"@en ;
  schema:description "The willingness of people to rely on AI systems when meaningful assurance mechanisms are present."@en ;
  schema:isPartOf <#DefinedTerms>, <#Article> .

<#defensibility> a schema:DefinedTerm ;
  schema:name "Defensibility"@en ;
  schema:description "The ability of governance arrangements to withstand external scrutiny with evidence and rationale."@en ;
  schema:isPartOf <#DefinedTerms>, <#Article> .

<#contestability> a schema:DefinedTerm ;
  schema:name "Contestability"@en ;
  schema:description "The condition in which an affected person can challenge an AI-assisted decision and receive a meaningful answer."@en ;
  schema:isPartOf <#DefinedTerms>, <#Article> .

<#ai-assisted-drafting> a schema:DefinedTerm ;
  schema:name "AI-assisted drafting"@en ;
  schema:description "Using AI as a drafting and research aid under human supervision rather than as an autonomous editorial authority."@en ;
  schema:isPartOf <#DefinedTerms>, <#Article> .

<#human-supervision> a schema:DefinedTerm ;
  schema:name "Human supervision"@en ;
  schema:description "Human responsibility for source verification, judgment, and editorial control over AI-assisted outputs."@en ;
  schema:isPartOf <#DefinedTerms>, <#Article> .

<#fabricated-reference-risk> a schema:DefinedTerm ;
  schema:name "Fabricated reference risk"@en ;
  schema:description "The failure mode in which an AI confidently invents a plausible but false citation or source."@en ;
  schema:isPartOf <#DefinedTerms>, <#Article> .

<#Section-Intro> a schema:WebPageElement ;
  schema:name "Why current AI governance fails"@en ;
  schema:position 1 ;
  schema:about <#ashbys-law>, <#governance-theatre>, <#trust-deficit> ;
  schema:text "The opening claim is that most governance programs underfit AI complexity by relying on static documents and symbolic controls."@en ;
  schema:isPartOf <#Article> .

<#Section-Hyperscalers> a schema:WebPageElement ;
  schema:name "What the hyperscalers learnt the hard way"@en ;
  schema:position 2 ;
  schema:about <#aws-ai-outage>, <#azure-front-door-outage>, <#human-checkpoint> ;
  schema:text "AWS and Azure incidents are used to show that even advanced operators need stronger review and containment around AI-assisted change."@en ;
  schema:isPartOf <#Article> .

<#Section-Hardness> a schema:WebPageElement ;
  schema:name "Four things that make AI genuinely hard to govern"@en ;
  schema:position 3 ;
  schema:about <#model-opacity>, <#emergent-capability>, <#training-data-opacity>, <#model-drift> ;
  schema:text "The article identifies four structural reasons AI governance is harder than ordinary software governance."@en ;
  schema:isPartOf <#Article> .

<#Section-Gap> a schema:WebPageElement ;
  schema:name "The governance gap in numbers"@en ;
  schema:position 4 ;
  schema:about <#intelligent-government-report>, <#adoption-without-assurance>, <#requisite-variety-gap> ;
  schema:text "Survey results and leadership data show that AI ambition is outrunning governance maturity."@en ;
  schema:isPartOf <#Article> .

<#Section-Assurance> a schema:WebPageElement ;
  schema:name "What can actually be done"@en ;
  schema:position 5 ;
  schema:about <#reasonable-assurance>, <#output-testing>, <#continuous-monitoring>, <#vendor-opacity> ;
  schema:text "The article reframes governance as a reasonable-assurance practice based on evidence, monitoring, and controlled response."@en ;
  schema:isPartOf <#Article> .

<#Section-Trust> a schema:WebPageElement ;
  schema:name "Trust, defensibility, and contestability"@en ;
  schema:position 6 ;
  schema:about <#public-trust>, <#defensibility>, <#contestability> ;
  schema:text "The closing direction moves from internal assurance toward systems that can withstand scrutiny and meaningful challenge."@en ;
  schema:isPartOf <#Article> .

<#Section-Transparency> a schema:WebPageElement ;
  schema:name "Transparency note"@en ;
  schema:position 7 ;
  schema:about <#ai-assisted-drafting>, <#human-supervision>, <#fabricated-reference-risk> ;
  schema:text "The article closes with a live example of AI-assisted drafting failure and argues that governance value sits in competent human checking."@en ;
  schema:isPartOf <#Article> .

<#HowTo-ReasonableAssurance> a schema:HowTo ;
  schema:name "How to govern AI with reasonable assurance rather than governance theatre"@en ;
  schema:description "The article turns AI governance into a practical assurance workflow rooted in evidence, monitoring, and defensible human oversight."@en ;
  schema:isPartOf <#Article> ;
  schema:hasPart <#Step1>, <#Step2>, <#Step3>, <#Step4>, <#Step5> ;
  schema:step <#Step1>, <#Step2>, <#Step3>, <#Step4>, <#Step5> .

<#Step1> a schema:HowToStep ;
  schema:name "Match governance variety to system variety"@en ;
  schema:position 1 ;
  schema:text "Start by treating AI governance as a complexity problem, not a documentation problem, and design controls proportionate to the system's risk and unpredictability."@en ;
  schema:isPartOf <#HowTo-ReasonableAssurance>, <#Article> .

<#Step2> a schema:HowToStep ;
  schema:name "Assume opacity and capability surprise"@en ;
  schema:position 2 ;
  schema:text "Build governance on the assumption that models are not fully inspectable and may display capabilities or failure modes that were not anticipated."@en ;
  schema:isPartOf <#HowTo-ReasonableAssurance>, <#Article> .

<#Step3> a schema:HowToStep ;
  schema:name "Use evidence-based output controls"@en ;
  schema:position 3 ;
  schema:text "Rely on output testing, behavioral benchmarks, bias assessment, and monitored boundaries rather than pretending full internal explainability already exists."@en ;
  schema:isPartOf <#HowTo-ReasonableAssurance>, <#Article> .

<#Step4> a schema:HowToStep ;
  schema:name "Design continuous monitoring and escalation"@en ;
  schema:position 4 ;
  schema:text "Treat governance as a live operational discipline with detection, response, and human checkpoints for high-consequence change or action."@en ;
  schema:isPartOf <#HowTo-ReasonableAssurance>, <#Article> .

<#Step5> a schema:HowToStep ;
  schema:name "Make trust defensible and contestable"@en ;
  schema:position 5 ;
  schema:text "Aim for governance that can survive scrutiny, support meaningful challenge, and show how decisions were supervised and evidenced."@en ;
  schema:isPartOf <#HowTo-ReasonableAssurance>, <#Article> .

<#Q1> a schema:Question ;
  schema:name "What is the article's core governance claim?"@en ;
  schema:acceptedAnswer <#Q1-answer> ;
  schema:isPartOf <#FAQ>, <#Article> .
<#Q1-answer> a schema:Answer ;
  schema:text "AI governance fails when organisations try to govern highly complex systems with controls that are too simple, static, or symbolic."@en ;
  schema:isPartOf <#Q1>, <#FAQ>, <#Article> .

<#Q2> a schema:Question ;
  schema:name "Why does the article invoke Ashby's Law?"@en ;
  schema:acceptedAnswer <#Q2-answer> ;
  schema:isPartOf <#FAQ>, <#Article> .
<#Q2-answer> a schema:Answer ;
  schema:text "Ashby's Law gives the article its central control principle: governance must match the complexity and risk variety of the system being governed."@en ;
  schema:isPartOf <#Q2>, <#FAQ>, <#Article> .

<#Q3> a schema:Question ;
  schema:name "What is governance theatre in this context?"@en ;
  schema:acceptedAnswer <#Q3-answer> ;
  schema:isPartOf <#FAQ>, <#Article> .
<#Q3-answer> a schema:Answer ;
  schema:text "It is the appearance of governance through policies, registers, and vendor promises that do not actually provide adequate control or assurance."@en ;
  schema:isPartOf <#Q3>, <#FAQ>, <#Article> .

<#Q4> a schema:Question ;
  schema:name "Why are hyperscaler incidents important to the argument?"@en ;
  schema:acceptedAnswer <#Q4-answer> ;
  schema:isPartOf <#FAQ>, <#Article> .
<#Q4-answer> a schema:Answer ;
  schema:text "They show that even organisations closest to the technology still need stronger review, containment, and human checkpoints around AI-assisted change."@en ;
  schema:isPartOf <#Q4>, <#FAQ>, <#Article> .

<#Q5> a schema:Question ;
  schema:name "What makes AI harder to govern than ordinary software?"@en ;
  schema:acceptedAnswer <#Q5-answer> ;
  schema:isPartOf <#FAQ>, <#Article> .
<#Q5-answer> a schema:Answer ;
  schema:text "The article highlights opacity, emergent capability, training-data opacity, and post-deployment drift as core governance obstacles."@en ;
  schema:isPartOf <#Q5>, <#FAQ>, <#Article> .

<#Q6> a schema:Question ;
  schema:name "What does the article mean by reasonable assurance?"@en ;
  schema:acceptedAnswer <#Q6-answer> ;
  schema:isPartOf <#FAQ>, <#Article> .
<#Q6-answer> a schema:Answer ;
  schema:text "It means seeking sufficient evidence-backed confidence for decision-making rather than pretending governance can deliver absolute certainty."@en ;
  schema:isPartOf <#Q6>, <#FAQ>, <#Article> .

<#Q7> a schema:Question ;
  schema:name "What controls does the article treat as real and useful?"@en ;
  schema:acceptedAnswer <#Q7-answer> ;
  schema:isPartOf <#FAQ>, <#Article> .
<#Q7-answer> a schema:Answer ;
  schema:text "It points to output testing, bias audits, behavioral benchmarks, continuous monitoring, escalation paths, and procurement-based transparency requirements."@en ;
  schema:isPartOf <#Q7>, <#FAQ>, <#Article> .

<#Q8> a schema:Question ;
  schema:name "How does the article treat vendor opacity?"@en ;
  schema:acceptedAnswer <#Q8-answer> ;
  schema:isPartOf <#FAQ>, <#Article> .
<#Q8-answer> a schema:Answer ;
  schema:text "As a real problem that cannot be eliminated completely, but can be managed through testing, governance controls, and contract design."@en ;
  schema:isPartOf <#Q8>, <#FAQ>, <#Article> .

<#Q9> a schema:Question ;
  schema:name "Why is trust discussed alongside defensibility and contestability?"@en ;
  schema:acceptedAnswer <#Q9-answer> ;
  schema:isPartOf <#FAQ>, <#Article> .
<#Q9-answer> a schema:Answer ;
  schema:text "Because governance must not only operate internally but also justify itself externally and support meaningful challenge from affected people."@en ;
  schema:isPartOf <#Q9>, <#FAQ>, <#Article> .

<#Q10> a schema:Question ;
  schema:name "What does the transparency note demonstrate?"@en ;
  schema:acceptedAnswer <#Q10-answer> ;
  schema:isPartOf <#FAQ>, <#Article> .
<#Q10-answer> a schema:Answer ;
  schema:text "It demonstrates that AI can produce plausible fabricated references, reinforcing the article's claim that governance value depends on competent human supervision."@en ;
  schema:isPartOf <#Q10>, <#FAQ>, <#Article> .

<https://www.linkedin.com/in/philiprpinol#this> a schema:Person ;
  schema:name "Philip Pinol"@en ;
  schema:url <https://www.linkedin.com/in/philiprpinol> ;
  schema:description "Founder, ThePraesidium.ai | AI Execution Control OS | Execution Control Infrastructure for Autonomous AI | Governance Runtime & Execution Authorization | Patent Pending"@en .

<https://www.linkedin.com/in/paul-roman-0b9753392#this> a schema:Person ;
  schema:name "Paul Roman"@en ;
  schema:url <https://www.linkedin.com/in/paul-roman-0b9753392> ;
  schema:description "Director | Enterprise Infrastructure & Delivery | AI Governance in Practice | Systems Thinking for Scalable, Accountable AI"@en .

<https://www.linkedin.com/in/nfigay#this> a schema:Person ;
  schema:name "Nicolas Figay"@en ;
  schema:url <https://www.linkedin.com/in/nfigay> ;
  schema:description "Inhabiting Babel | Semantic Cartography for Industrial Interoperability | Making Models Work Together | EA · MBSE · PLM | Semantic Compass"@en .

<https://www.linkedin.com/in/michalrodzos#this> a schema:Person ;
  schema:name "Michal Rodzos"@en ;
  schema:url <https://www.linkedin.com/in/michalrodzos> ;
  schema:description "Director, Actuarial & Advanced Analytics | KPMG Australia | Government & Defence | AI, Data Science & Risk Modelling"@en .

<https://www.linkedin.com/in/yasirabbasai#this> a schema:Person ;
  schema:name "Yasir Abbas"@en ;
  schema:url <https://www.linkedin.com/in/yasirabbasai> ;
  schema:description "Founder & CEO @WhyCrew | Hire Fully Managed Tech Talent at Half the Cost"@en .

<https://www.linkedin.com/company/security-atlas-ai#this> a schema:Organization ;
  schema:name "Security Atlas AI"@en ;
  schema:url <https://www.linkedin.com/company/security-atlas-ai/> ;
  schema:description "374 followers"@en .

<https://www.linkedin.com/in/kasim-khan-abacus#this> a schema:Person ;
  schema:name "Kasim K."@en ;
  schema:url <https://www.linkedin.com/in/kasim-khan-abacus> ;
  schema:description "Head of Global Marketing & Branding, Abacus."@en .

<https://www.linkedin.com/in/tsvasman#this> a schema:Person ;
  schema:name "Dr. Leon TSVASMAN"@en ;
  schema:url <https://www.linkedin.com/in/tsvasman> ;
  schema:description "Polymath on a Mission | Nth-Order Cybernetics→ Strategic Autonomy | Philosophy of Sapiognosis: Infosomatics - Sapiopoiesis - Sapiocracy | Epistemic Integrity→ Civilization Design | Future Council • FCybS • Board Advisor"@en .

<https://www.linkedin.com/in/albertosurina#this> a schema:Person ;
  schema:name "Alberto Surina"@en ;
  schema:url <https://www.linkedin.com/in/albertosurina> ;
  schema:description "Emerging Tech Investor & Professional Violinist"@en .

<https://www.linkedin.com/in/bartosz-piwcewicz#this> a schema:Person ;
  schema:name "Bartosz Piwcewicz"@en ;
  schema:url <https://www.linkedin.com/in/bartosz-piwcewicz> ;
  schema:description "Advanced Analytics & Digital Transformation | Government + Private Sector Innovation"@en .

<#Comment-PhilipPinol> a schema:Comment ;
  schema:author <https://www.linkedin.com/in/philiprpinol#this> ;
  schema:url <https://www.linkedin.com/pulse/honest-problem-ai-governance-michal-rodzos-krgmc/> ;
  schema:dateCreated "17h" ;
  schema:quotation "observability and assurance"@en ;
  schema:text "Philip Pinol agrees with the distinction between observability and assurance, then extends the argument by saying output-stage auditing breaks down once systems can commit actions. He reframes governance around decision-time admissibility and the commit boundary."@en ;
  schema:mentions <https://www.linkedin.com/in/paul-roman-0b9753392#this> ;
  schema:isPartOf <#Discussion>, <#Article> .

<#Comment-PaulRomanOnPhilip> a schema:Comment ;
  schema:author <https://www.linkedin.com/in/paul-roman-0b9753392#this> ;
  schema:url <https://www.linkedin.com/pulse/honest-problem-ai-governance-michal-rodzos-krgmc/> ;
  schema:dateCreated "7h" ;
  schema:quotation "governance moves from monitoring to control"@en ;
  schema:text "Paul Roman replies that once systems commit actions, governance must define what is allowed before the fact. He emphasizes structure, ownership, and accountability at the decision boundary."@en ;
  schema:parentItem <#Comment-PhilipPinol> ;
  schema:mentions <https://www.linkedin.com/in/philiprpinol#this> ;
  schema:isPartOf <#Discussion>, <#Article> .

<#Comment-NicolasFigay> a schema:Comment ;
  schema:author <https://www.linkedin.com/in/nfigay#this> ;
  schema:url <https://www.linkedin.com/pulse/honest-problem-ai-governance-michal-rodzos-krgmc/> ;
  schema:dateCreated "15h" ;
  schema:quotation "healthy caution toward stochastic systems"@en ;
  schema:text "Nicolas Figay says the article formalizes long-standing concerns from applied statistics and interoperability work. He stresses traceability, architecture, verification and validation, and continuous monitoring, then asks how those disciplines can be applied rigorously to AI."@en ;
  schema:mentions <#model-drift>, <#continuous-monitoring> ;
  schema:isPartOf <#Discussion>, <#Article> .

<#Comment-MichalReplyToNicolas> a schema:Comment ;
  schema:author <https://www.linkedin.com/in/michalrodzos#this> ;
  schema:url <https://www.linkedin.com/pulse/honest-problem-ai-governance-michal-rodzos-krgmc/> ;
  schema:dateCreated "13h" ;
  schema:quotation "the same principles apply"@en ;
  schema:text "Michal Rodzos replies that traditional governance principles still apply, but the implementation must account for drifting, partially inspectable systems that do not remain stable between audits."@en ;
  schema:parentItem <#Comment-NicolasFigay> ;
  schema:mentions <https://www.linkedin.com/in/nfigay#this> ;
  schema:isPartOf <#Discussion>, <#Article> .

<#Comment-YasirAbbas> a schema:Comment ;
  schema:author <https://www.linkedin.com/in/yasirabbasai#this> ;
  schema:url <https://www.linkedin.com/pulse/honest-problem-ai-governance-michal-rodzos-krgmc/> ;
  schema:dateCreated "13h" ;
  schema:quotation "static controls to continuous assurance"@en ;
  schema:text "Yasir Abbas compresses the article into a shift from static controls toward continuous assurance, emphasizing evidence over explainability and defensibility over certainty."@en ;
  schema:isPartOf <#Discussion>, <#Article> .

<#Comment-PaulRomanOnYasir> a schema:Comment ;
  schema:author <https://www.linkedin.com/in/paul-roman-0b9753392#this> ;
  schema:url <https://www.linkedin.com/pulse/honest-problem-ai-governance-michal-rodzos-krgmc/> ;
  schema:dateCreated "7h" ;
  schema:quotation "defensibility starts at the decision boundary"@en ;
  schema:text "Paul Roman replies that monitoring provides evidence, but governance becomes real only when boundaries are defined before action is committed."@en ;
  schema:parentItem <#Comment-YasirAbbas> ;
  schema:mentions <https://www.linkedin.com/in/yasirabbasai#this> ;
  schema:isPartOf <#Discussion>, <#Article> .

<#Comment-PaulRoman> a schema:Comment ;
  schema:author <https://www.linkedin.com/in/paul-roman-0b9753392#this> ;
  schema:url <https://www.linkedin.com/pulse/honest-problem-ai-governance-michal-rodzos-krgmc/> ;
  schema:dateCreated "7h" ;
  schema:quotation "Governability only scales when it is intentionally designed"@en ;
  schema:text "Paul Roman argues that AI governance must become operating structure rather than policy, with validation, monitoring, escalation paths, and explicit decision ownership."@en ;
  schema:isPartOf <#Discussion>, <#Article> .

<#Comment-SecurityAtlasAI> a schema:Comment ;
  schema:author <https://www.linkedin.com/company/security-atlas-ai#this> ;
  schema:url <https://www.linkedin.com/pulse/honest-problem-ai-governance-michal-rodzos-krgmc/> ;
  schema:dateCreated "7h" ;
  schema:quotation "regulatory defensibility today"@en ;
  schema:text "Security Atlas AI asks whether current governance frameworks are optimized more for regulatory defensibility than for public trust and contestability."@en ;
  schema:mentions <#defensibility>, <#contestability>, <#public-trust> ;
  schema:isPartOf <#Discussion>, <#Article> .

<#Comment-KasimK> a schema:Comment ;
  schema:author <https://www.linkedin.com/in/kasim-khan-abacus#this> ;
  schema:url <https://www.linkedin.com/pulse/honest-problem-ai-governance-michal-rodzos-krgmc/> ;
  schema:dateCreated "10h" ;
  schema:quotation "who authorised this action"@en ;
  schema:text "Kasim K. says real assurance must answer at runtime who authorized an action, what evidence supported it, and what trail exists for review."@en ;
  schema:isPartOf <#Discussion>, <#Article> .

<#Comment-LeonTsvasman> a schema:Comment ;
  schema:author <https://www.linkedin.com/in/tsvasman#this> ;
  schema:url <https://www.linkedin.com/pulse/honest-problem-ai-governance-michal-rodzos-krgmc/> ;
  schema:dateCreated "13h" ;
  schema:quotation "decisive framing has already migrated into systems"@en ;
  schema:text "Dr. Leon TSVASMAN argues that governance language often arrives after decisive system framing has already been embedded upstream, then links to his own essay on AI's missing layer."@en ;
  schema:citation <#Comment-LeonTsvasman-citation> ;
  schema:isPartOf <#Discussion>, <#Article> .

<#Comment-LeonTsvasman-citation> a schema:CreativeWork ;
  schema:url <https://leontsvasmansapiognosis.substack.com/p/ais-missing-layer> ;
  schema:name "https://leontsvasmansapiognosis.substack.com/p/ais-missing-layer"@en .

<#Comment-AlbertoSurina> a schema:Comment ;
  schema:author <https://www.linkedin.com/in/albertosurina#this> ;
  schema:url <https://www.linkedin.com/pulse/honest-problem-ai-governance-michal-rodzos-krgmc/> ;
  schema:dateCreated "5h" ;
  schema:quotation "Auditors, regulators, citizens"@en ;
  schema:text "Alberto Surina reacts to the article's triangulation of auditors, regulators, and citizens as the key governance audiences."@en ;
  schema:isPartOf <#Discussion>, <#Article> .

<#Comment-BartoszPiwcewicz> a schema:Comment ;
  schema:author <https://www.linkedin.com/in/bartosz-piwcewicz#this> ;
  schema:url <https://www.linkedin.com/pulse/honest-problem-ai-governance-michal-rodzos-krgmc/> ;
  schema:dateCreated "11h" ;
  schema:quotation "really insightful"@en ;
  schema:text "Bartosz Piwcewicz thanks Michal Rodzos for the article and signals interest in the rest of the series."@en ;
  schema:mentions <https://www.linkedin.com/in/michalrodzos#this> ;
  schema:isPartOf <#Discussion>, <#Article> .

<#Ref-Ashby> a schema:CreativeWork ;
  schema:name "Ashby, W.R. (1956) An Introduction to Cybernetics"@en ;
  schema:url <https://archive.org/details/introductiontocy00ashb> ;
  schema:isPartOf <#Article> .

<#Ref-TrustStudy> a schema:CreativeWork ;
  schema:name "Trust, attitudes and use of artificial intelligence: A global study 2025"@en ;
  schema:url <https://figshare.unimelb.edu.au/articles/report/Trust_attitudes_and_use_of_artificial_intelligence_A_global_study_2025/28822919> ;
  schema:isPartOf <#Article> .

<#Ref-KPMGGov> a schema:CreativeWork ;
  schema:name "KPMG Intelligent Government report 2025"@en ;
  schema:url <https://assets.kpmg.com/content/dam/kpmgsites/sg/pdf/2025/10/intelligent-government-report-2025.pdf> ;
  schema:isPartOf <#Article> .

<#Ref-AmazonRebuttal> a schema:CreativeWork ;
  schema:name "Amazon outage rebuttal"@en ;
  schema:url <https://www.aboutamazon.com/news/company-news/amazon-outage-ai-financial-times-correction> ;
  schema:isPartOf <#Article> .

<#Ref-CNBCAWS> a schema:CreativeWork ;
  schema:name "CNBC reporting on Amazon AI-related outages"@en ;
  schema:url <https://www.cnbc.com/2026/03/10/amazon-plans-deep-dive-internal-meeting-address-ai-related-outages.html> ;
  schema:isPartOf <#Article> .

<#Ref-AzureReview> a schema:CreativeWork ;
  schema:name "Azure Front Door post-incident review"@en ;
  schema:url <https://techcommunity.microsoft.com/blog/azurenetworkingblog/azure-front-door-implementing-lessons-learned-following-october-outages/4479416> ;
  schema:isPartOf <#Article> .

<#Ref-MIT> a schema:CreativeWork ;
  schema:name "MIT research on AI overconfidence"@en ;
  schema:url <https://news.mit.edu/2026/better-method-identifying-overconfident-large-language-models-0319> ;
  schema:isPartOf <#Article> .

<#Ref-TechCrunchO3> a schema:CreativeWork ;
  schema:name "TechCrunch on OpenAI o3 hallucination benchmarks"@en ;
  schema:url <https://techcrunch.com/2025/04/18/openais-new-reasoning-ai-models-hallucinate-more/> ;
  schema:isPartOf <#Article> .
