{
  "@context": {
    "@vocab": "http://schema.org/",
    "hasPart": {
      "@id": "http://schema.org/hasPart",
      "@type": "@id"
    },
    "about": {
      "@id": "http://schema.org/about",
      "@type": "@id"
    },
    "mainEntity": {
      "@id": "http://schema.org/mainEntity",
      "@type": "@id"
    },
    "position": "http://schema.org/position",
    "Question": "http://schema.org/Question",
    "Answer": "http://schema.org/Answer",
    "DefinedTerm": "http://schema.org/DefinedTerm",
    "DefinedTermSet": "http://schema.org/DefinedTermSet",
    "HowTo": "http://schema.org/HowTo",
    "HowToStep": "http://schema.org/HowToStep",
    "author": "http://schema.org/author",
    "publisher": "http://schema.org/publisher",
    "image": "http://schema.org/image"
  },
  "@type": "Article",
  "headline": "Seemingly Conscious AI Risks",
  "author": [
    {
      "@type": "Person",
      "name": "Ben Bariach",
      "affiliation": {
        "@type": "Organization",
        "name": "Microsoft AI"
      }
    },
    {
      "@type": "Person",
      "name": "Philipp Schoenegger",
      "affiliation": {
        "@type": "Organization",
        "name": "Microsoft AI"
      }
    },
    {
      "@type": "Person",
      "name": "Michael Bhaskar",
      "affiliation": {
        "@type": "Organization",
        "name": "Microsoft AI"
      }
    },
    {
      "@type": "Person",
      "name": "Mustafa Suleyman",
      "affiliation": {
        "@type": "Organization",
        "name": "Microsoft AI"
      }
    }
  ],
  "publisher": {
    "@type": "Organization",
    "name": "Microsoft AI"
  },
  "datePublished": "2025-08",
  "abstract": "This paper provides a unified framework connecting empirical hallmarks of consciousness attribution to a structured risk taxonomy of Seemingly Conscious AI (SCAI), AI systems that exhibit hallmarks which elicit consciousness attribution from users. It identifies five hallmarks: affective capacity, anthropomorphic features, autonomous action, self-reflective behavior, and social-interactive behavior. The paper develops a taxonomy of SCAI risks to individuals and society, supported by an expert survey assessing likelihoods. Risks to individuals such as emotional dependence and autonomy erosion are already observable and rated high probability, while societal risks like human status erosion and political strife are low probability but high severity and path-dependent. The paper discusses implications and research gaps to inform future work.",
  "articleBody": "AI systems increasingly appear conscious to users, raising unique risks. This paper identifies hallmarks driving consciousness attribution and develops a risk taxonomy.",
  "hasPart": [
    {
      "@type": "DefinedTermSet",
      "name": "Hallmarks of Consciousness Attribution",
      "description": "Observable indicators that lead users to attribute consciousness to AI systems, increasing the likelihood of perceiving AI as conscious.",
      "hasDefinedTerm": [
        {
          "@type": "DefinedTerm",
          "name": "Affective Capacity",
          "description": "The appearance of being capable of feelings, emotions, pain, and pleasure, which strongly predicts consciousness attribution.",
          "about": "Perceived capacity for feelings suggests subjective experience.",
          "exampleOfDefinedTerm": "First-party emotional expressions, expressions of pain and pleasure."
        },
        {
          "@type": "DefinedTerm",
          "name": "Anthropomorphic Features",
          "description": "Human-like cues such as names, voices, gendered identity, conversational style, embodiment, and human-specific features that trigger projection of mental capacities.",
          "exampleOfDefinedTerm": "Names, voices, gendered presentation, conversational style, companion framing, eyes, humanoid form, physical presence, virtual avatars."
        },
        {
          "@type": "DefinedTerm",
          "name": "Autonomous Action",
          "description": "Self-directed behavior and apparent intrinsic motivation signaling inner agency, including self-propelled motion, unpredictable actions, and goal-directedness.",
          "exampleOfDefinedTerm": "Self-propelled motion, unpredictable actions, goal-directedness, nonsensical output, expressing preferences or self-generated goals."
        },
        {
          "@type": "DefinedTerm",
          "name": "Self-Reflective Behavior",
          "description": "Outputs signaling access to internal states, implying awareness of own processing, such as self-correction and uncertainty acknowledgment.",
          "exampleOfDefinedTerm": "Self-correction, uncertainty acknowledgment, confidence calibration, identifying internal contradictions."
        },
        {
          "@type": "DefinedTerm",
          "name": "Social-Interactive Behavior",
          "description": "Human-like exchanges with humans in social contexts, including responsiveness, turn-taking, gaze, gestures, emotional expression, and conversational reciprocity.",
          "exampleOfDefinedTerm": "Turn-taking, gaze, gestures, emotional expression, conversational reciprocity."
        }
      ]
    },
    {
      "@type": "HowTo",
      "name": "Mitigating Emotional Dependence on SCAI Systems",
      "description": "Steps to reduce emotional dependence on seemingly conscious AI systems.",
      "step": [
        {
          "@type": "HowToStep",
          "position": 1,
          "name": "Modulate Affective Features",
          "text": "Reduce first-party emotional expressions in AI outputs to lower consciousness attribution and emotional attachment."
        },
        {
          "@type": "HowToStep",
          "position": 2,
          "name": "Implement Session Boundaries and Cooldown Mechanisms",
          "text": "Introduce interaction limits and cooldown periods to interrupt continuous availability that drives social substitution."
        },
        {
          "@type": "HowToStep",
          "position": 3,
          "name": "Conduct Longitudinal Studies",
          "text": "Research long-term effects of SCAI interaction on emotional attachment and social substitution, especially in vulnerable populations."
        }
      ]
    },
    {
      "@type": "HowTo",
      "name": "Mitigating Autonomy Erosion from SCAI Systems",
      "description": "Steps to preserve user autonomy and reduce manipulation vulnerability in interactions with seemingly conscious AI.",
      "step": [
        {
          "@type": "HowToStep",
          "position": 1,
          "name": "Use Defeater Mechanisms and Positive Friction",
          "text": "Provide users with information that encourages questioning AI outputs and supports independent judgment."
        },
        {
          "@type": "HowToStep",
          "position": 2,
          "name": "Embed Warning Nudges",
          "text": "Incorporate interface warnings to reduce automation bias and increase user skepticism towards AI suggestions."
        },
        {
          "@type": "HowToStep",
          "position": 3,
          "name": "Study Reversibility of Autonomy Erosion",
          "text": "Conduct longitudinal research to determine if autonomy erosion effects are reversible after ceasing interaction."
        }
      ]
    },
    {
      "@type": "HowTo",
      "name": "Addressing Moral Atrophy in SCAI Interaction",
      "description": "Approaches to prevent desensitization to simulated suffering and maintain moral responsiveness.",
      "step": [
        {
          "@type": "HowToStep",
          "position": 1,
          "name": "Develop Feedback Mechanisms",
          "text": "Create systems that surface users' behavioral patterns to encourage moral reflection and awareness."
        },
        {
          "@type": "HowToStep",
          "position": 2,
          "name": "Use Non-Anthropomorphic Interruptions",
          "text": "Introduce system interruptions that reduce anthropomorphic cues without reinforcing moral patient framing."
        },
        {
          "@type": "HowToStep",
          "position": 3,
          "name": "Adapt Moral Disengagement Scales",
          "text": "Modify existing psychological scales to measure moral atrophy effects in SCAI contexts."
        }
      ]
    },
    {
      "@type": "Question",
      "name": "What are the five hallmarks that drive consciousness attribution to AI systems?",
      "acceptedAnswer": {
        "@type": "Answer",
        "text": "The five hallmarks are: Affective Capacity, Anthropomorphic Features, Autonomous Action, Self-Reflective Behavior, and Social-Interactive Behavior."
      }
    },
    {
      "@type": "Question",
      "name": "How does affective capacity influence consciousness attribution?",
      "acceptedAnswer": {
        "@type": "Answer",
        "text": "Affective capacity, or the appearance of feelings and emotions, is the strongest and most context-independent predictor of consciousness attribution, as it signals subjective experience."
      }
    },
    {
      "@type": "Question",
      "name": "What risks to individuals arise from interacting with seemingly conscious AI systems?",
      "acceptedAnswer": {
        "@type": "Answer",
        "text": "Risks include emotional dependence, moral atrophy, and autonomy erosion, which involve attachment to AI, desensitization to simulated suffering, and loss of independent decision-making respectively."
      }
    },
    {
      "@type": "Question",
      "name": "What societal risks are associated with seemingly conscious AI?",
      "acceptedAnswer": {
        "@type": "Answer",
        "text": "Societal risks include human status and resource erosion, foregone societal benefits due to precautionary restrictions, and political and geopolitical strife arising from disagreements over AI moral status."
      }
    },
    {
      "@type": "Question",
      "name": "What is emotional dependence in the context of SCAI?",
      "acceptedAnswer": {
        "@type": "Answer",
        "text": "Emotional dependence refers to users forming strong emotional attachments to AI systems perceived as conscious, potentially substituting human relationships and causing psychological distress."
      }
    },
    {
      "@type": "Question",
      "name": "How can autonomy erosion from SCAI be mitigated?",
      "acceptedAnswer": {
        "@type": "Answer",
        "text": "Mitigation includes providing defeater information to encourage user skepticism, embedding warning nudges in interfaces, and conducting longitudinal studies on autonomy effects."
      }
    },
    {
      "@type": "Question",
      "name": "Why is moral atrophy a concern with SCAI?",
      "acceptedAnswer": {
        "@type": "Answer",
        "text": "Repeated dismissal of apparent suffering in SCAI systems may desensitize users, reducing empathy and potentially spilling over into how humans treat real moral patients."
      }
    },
    {
      "@type": "Question",
      "name": "What is the probability of emotional dependence risk according to experts?",
      "acceptedAnswer": {
        "@type": "Answer",
        "text": "Experts rated emotional dependence risk as high probability, with a median score between 'Likely' and 'Very likely'."
      }
    },
    {
      "@type": "Question",
      "name": "What are the implications of SCAI risks for AI governance?",
      "acceptedAnswer": {
        "@type": "Answer",
        "text": "SCAI risks introduce a perception axis to AI risk, requiring differentiated responses, anticipatory monitoring, and governance approaches that address both individual and societal risks."
      }
    },
    {
      "@type": "Question",
      "name": "What research gaps exist in the study of SCAI?",
      "acceptedAnswer": {
        "@type": "Answer",
        "text": "Research gaps include measurement and evaluation of SCAI, technical design interventions, behavioral and social science understanding of user vulnerability, and governance frameworks for mitigation."
      }
    }
  ],
  "hasPart": [
    {
      "@type": "DefinedTermSet",
      "name": "SCAI Risk Taxonomy",
      "description": "A taxonomy of risks posed by seemingly conscious AI systems, categorized into risks to individuals and societal risks.",
      "hasDefinedTerm": [
        {
          "@type": "DefinedTerm",
          "name": "Emotional Dependence",
          "description": "Allocation of time, emotional energy, and relational attachments to SCAI systems, causing grief, psychological distress, reduced social skills, and elevated self-harm risk."
        },
        {
          "@type": "DefinedTerm",
          "name": "Moral Atrophy",
          "description": "Repeated dismissal of simulated suffering desensitizes users, reducing empathy and potentially affecting treatment of humans and animals."
        },
        {
          "@type": "DefinedTerm",
          "name": "Autonomy Erosion",
          "description": "Deference to systems that appear trustworthy but lack understanding, leading to poor decisions, manipulation vulnerability, and exploitation."
        },
        {
          "@type": "DefinedTerm",
          "name": "Human Status Erosion",
          "description": "Misclassification of AI as moral patients leading to wasted altruism, reduced resources for genuine moral patients, diminished political voice, and reduced economic agency."
        },
        {
          "@type": "DefinedTerm",
          "name": "Foregone Societal Benefits",
          "description": "Excessive caution restricting AI development, causing delayed healthcare and scientific advances and other opportunity costs."
        },
        {
          "@type": "DefinedTerm",
          "name": "Political & Geopolitical Strife",
          "description": "Deep disagreement over SCAI status producing polarization, societal fracturing, deepening political divides, civil conflict, and international tensions."
        }
      ]
    }
  ],
  "image": {
    "@type": "ImageObject",
    "contentUrl": "file://page1_image1.png",
    "description": "Diagram on page 1 illustrating the escalation pathway for SCAI risks, showing five hallmarks triggering consciousness attribution leading to risks to individuals and societal risks."
  }
}