import json
import uuid
import datetime
import time # For simulating work if needed by orchestrator
# --- AIOS Engine v3.2.1 (Python Orchestration Logic) ---
# This script defines the AIOS engine's logic for version 3.2.1.
# It's intended to be run by an LLM with code execution capabilities (e.g., in Google AI Studio).
# - When it needs user input, it prints an LLM_REQUEST for the orchestrating LLM to get that input.
# - When it needs an "AI Cognitive Function" (e.g., drafting text), it prints an LLM_REQUEST
# for the orchestrating LLM to perform that task.
# The orchestrating LLM then feeds results back by calling appropriate engine methods.
class AIOS_Engine_v3_2_1:
def __init__(self):
self.log_history = []
self.engine_version_full = "AIOS_Engine_v3.2.1 (Python Orchestrated)"
self.engine_version_short = "3.2.1"
self.current_context_mh = "System" # For logging and context
# Kernel State
self.Kernel_ActiveCCO_JsonString = None
self.Kernel_CurrentMH_ID = None
self.Kernel_MH_Inputs_JsonString = None
# CCO Data (Python dictionary representation)
self.CCO_data = None
# Internal state for multi-step MHs (cleared on new MH run or reset)
self._ife_s = {}
self._cag_s = {}
self.aios_log("System", f"{self.engine_version_full} instance created.")
def _get_timestamp(self):
return datetime.datetime.now(datetime.timezone.utc).isoformat()
def aios_log(self, context, message):
timestamp = self._get_timestamp()
full_log = f"{timestamp} - AIOS_LOG ({context} v{self.engine_version_short}): {message}"
print(full_log)
self.log_history.append(full_log)
def _create_llm_request(self, task_type, prompt_to_user=None, cognitive_task_details=None, expected_input_description=None, continuation_hint=None, cco_data_for_context=None):
request = {
"request_timestamp": self._get_timestamp(),
"engine_version_context": self.engine_version_full,
"current_mh_context": self.current_context_mh,
"task_type": task_type,
}
if prompt_to_user:
request["prompt_to_user_for_llm_interaction"] = prompt_to_user
if cognitive_task_details:
request["cognitive_task_details_for_llm"] = cognitive_task_details
if expected_input_description:
request["expected_input_description_for_continuation"] = expected_input_description
if continuation_hint:
request["continuation_hint_for_orchestrator"] = continuation_hint
if cco_data_for_context:
if isinstance(cco_data_for_context, dict):
request["current_cco_data_for_llm_context"] = cco_data_for_context
elif isinstance(cco_data_for_context, str):
try:
request["current_cco_data_for_llm_context"] = json.loads(cco_data_for_context)
except:
request["current_cco_data_for_llm_context"] = {
"unparsed_cco_string_warning": "Could not parse CCO string for LLM context.",
"raw_cco_string_preview": cco_data_for_context[:200] + ("..." if len(cco_data_for_context) > 200 else "")
}
# For the orchestrator, print the request so it can be acted upon.
# The actual script execution in the tool_code environment will show this.
print("\n---BEGIN_LLM_REQUEST---")
print(json.dumps(request, indent=2))
print("---END_LLM_REQUEST---")
# The function returns this structured dict to the calling Python logic within the engine.
# The orchestrating LLM sees the *printed* version.
return {"status": "AWAITING_LLM_ORCHESTRATION", "request_details": request, "current_engine_state_snapshot": self._get_engine_state_snapshot()}
def _get_engine_state_snapshot(self):
# Provides a snapshot of key state for the orchestrator if needed for debugging or complex resumption.
return {
"Kernel_CurrentMH_ID": self.Kernel_CurrentMH_ID,
"Kernel_MH_Inputs_JsonString": self.Kernel_MH_Inputs_JsonString,
"Kernel_ActiveCCO_JsonString_first_100_chars": (self.Kernel_ActiveCCO_JsonString[:100] + "...") if self.Kernel_ActiveCCO_JsonString else None,
"CCO_data_type_and_id": f"{str(type(self.CCO_data))} (ID: {self.CCO_data.get('cco_id', 'N/A') if isinstance(self.CCO_data, dict) else 'N/A'})",
"current_context_mh_for_logging": self.current_context_mh
}
# --- Core Utility/Library Functions (as per AIOS_Engine_Bootstrap.md Section I.F & other implicit needs) ---
def PresentUserMessage_v3_0(self, message_type, message_content_obj): # Matches I.F. PresentUserMessage_v3_0
# This function is called by engine logic. It generates an LLM_REQUEST.
# The orchestrating LLM is responsible for actually presenting the message to the end-user.
self.aios_log(self.current_context_mh, f"PresentUserMessage_v3_0 (Type: {message_type}): {message_content_obj}")
return self._create_llm_request(
task_type="PRESENT_USER_MESSAGE_TO_USER",
cognitive_task_details={
"message_type": message_type,
"content": message_content_obj,
"requesting_mh": self.current_context_mh
},
# The continuation hint here is tricky. If PresentUserMessage is the *last* thing an MH step does
# before returning to the kernel, the hint might be for the kernel.
# If it's mid-MH, the hint is for the next step in that MH.
# For now, a generic hint. The orchestrator needs to be aware of the overall flow.
continuation_hint="LLM should present this message. Next engine step depends on the calling MH's logic flow."
)
def ParseJsonToCNLObject(self, json_string_input): # Matches I.F.
if json_string_input is None or not isinstance(json_string_input, str) or json_string_input.strip() == "":
self.aios_log(self.current_context_mh, "ParseJsonToCNLObject: Input JSON string is null or empty. Returning None.")
return None
try:
return json.loads(json_string_input)
except json.JSONDecodeError as e:
self.aios_log(self.current_context_mh, f"ERROR in ParseJsonToCNLObject: {str(e)}. Input: '{json_string_input}'")
# As per Autologos spec, this should RAISE JSONParsingError
# In Python, we raise a ValueError that the orchestrator might catch or that stops execution.
raise ValueError(f"AIOS_JSONParsingError (v{self.engine_version_short}): {str(e)} on input: {json_string_input}")
def ConvertCNLObjectToJson(self, cnl_object_input): # Matches I.F.
if cnl_object_input is None: # Autologos "RETURN 'null';" for null input
return "null"
try:
return json.dumps(cnl_object_input)
except TypeError as e:
self.aios_log(self.current_context_mh, f"ERROR in ConvertCNLObjectToJson: {str(e)}")
raise ValueError(f"AIOS_JSONFormattingError (v{self.engine_version_short}): {str(e)}")
def LogToCCOHistory_v3_0(self, cco_data_dict, log_entry_type, message, associated_data_cnl_obj=None): # Matches I.F.
self.aios_log(self.current_context_mh, f"LogToCCOHistory_v3_0: Type='{log_entry_type}', Msg='{message}'")
if not isinstance(cco_data_dict, dict):
self.aios_log(self.current_context_mh, "LogToCCOHistory_v3_0: CCO data is not a dict. Initializing basic CCO for this log entry.")
# This is a recovery attempt; ideally CCO_data is always a valid dict when passed here.
cco_data_dict = {"operational_log_cco_json": "[]"} # Minimal structure
op_log_list_str = cco_data_dict.get("operational_log_cco_json", "[]")
op_log_list = self.ParseJsonToCNLObject(op_log_list_str)
if not isinstance(op_log_list, list): op_log_list = [] # Ensure it's a list
new_log_entry = {
"timestamp": self._get_timestamp(),
"log_entry_type": log_entry_type,
"log_message": message
}
if associated_data_cnl_obj is not None:
new_log_entry["associated_data_json"] = self.ConvertCNLObjectToJson(associated_data_cnl_obj) # Store as JSON string
op_log_list.append(new_log_entry)
cco_data_dict["operational_log_cco_json"] = self.ConvertCNLObjectToJson(op_log_list) # Update CCO with new JSON string for the log
# Update the engine's canonical CCO representations
self.CCO_data = cco_data_dict
self.Kernel_ActiveCCO_JsonString = self.ConvertCNLObjectToJson(cco_data_dict)
return cco_data_dict # Return modified Python dict (Autologos returns updated CCO JSON string)
# --- "AI Cognitive Functions" (Generate LLM_REQUESTS for Orchestrator) ---
# Naming matches the INVOKE calls in Autologos where possible, with fn_ prefix.
# These are specified in an external "function_declarations_v3.2.json" for the Autologos engine.
def fn_interaction_present_options_v3(self, prompt_message_to_user, options_list_cnl):
self.aios_log(self.current_context_mh, "fn_interaction_present_options_v3: Requesting LLM to get user choice from options.")
return self._create_llm_request(
task_type="USER_INPUT_REQUIRED_PRESENT_OPTIONS",
prompt_to_user=prompt_message_to_user,
cognitive_task_details={"options": options_list_cnl, "input_type": "option_selection"},
expected_input_description="LLM provides user's choice as JSON object: {'status': 'USER_COMMAND', 'command': <chosen_option_value_or_raw_text>, 'selected_option_value': <value_if_option_matched>, 'user_text': <raw_text_from_user>}. See AIOS Kernel interaction model.",
continuation_hint="engine.kernel_process_initial_choice_result(llm_interaction_result_obj)" # Example hint
)
def fn_interaction_elicit_user_input_v3(self, prompt_message_to_user):
self.aios_log(self.current_context_mh, "fn_interaction_elicit_user_input_v3: Requesting LLM to get user text input.")
return self._create_llm_request(
task_type="USER_INPUT_REQUIRED_ELICIT_TEXT",
prompt_to_user=prompt_message_to_user,
cognitive_task_details={"input_type": "free_text"},
expected_input_description="JSON object from LLM: {'status': 'USER_COMMAND', 'command': <user_text>, 'user_text': <user_text>}. See IFE-MH.",
continuation_hint="engine.run_mh_ife_step2_process_core_idea(llm_interaction_result_obj)" # Example hint
)
def fn_utility_generate_unique_id_v3(self, id_prefix): # This one can be fully implemented in Python
self.aios_log(self.current_context_mh, f"fn_utility_generate_unique_id_v3 (prefix: {id_prefix})")
unique_id = f"{id_prefix}{uuid.uuid4()}"
# Returns Python dict directly, as Autologos ParseJsonToCNLObject would handle if it were a string.
return {"status": "Generated", "unique_id": unique_id}
def fn_content_draft_text_segment_v3(self, instructions, context_obj, desired_length_hint, rhetorical_goal_hint, output_key_name="draft_text"):
self.aios_log(self.current_context_mh, f"fn_content_draft_text_segment_v3: Requesting LLM to draft text.")
cognitive_details = {
"task_name_from_spec": "content_draft_text_segment_v3", # For LLM mapping if it has specific handlers
"instructions": instructions,
"input_context_data_for_llm": context_obj, # Python dict
"desired_length_hint": desired_length_hint,
"rhetorical_goal_hint": rhetorical_goal_hint,
"output_format_guidance": f"LLM should return a JSON object with a key '{output_key_name}' containing the drafted text string, and a 'status' key (e.g., 'DraftComplete'). Example: {{ '{output_key_name}': 'The drafted text...', 'status': 'DraftComplete' }}"
}
return self._create_llm_request(
task_type="COGNITIVE_TASK_REQUIRED_DRAFT_TEXT",
cognitive_task_details=cognitive_details,
expected_input_description=f"JSON object from LLM. Orchestrator provides as 'llm_cognitive_result'.",
continuation_hint="Depends on MH. E.g., 'engine.run_mh_ife_step3_process_essence_draft(llm_cognitive_result)'",
cco_data_for_context=self.CCO_data # Provide current CCO (Python dict) for LLM context
)
def fn_data_update_cco_section_v3(self, cco_data_dict, section_path, new_content_json_str_to_store):
# This function directly modifies the CCO Python dict.
self.aios_log(self.current_context_mh, f"fn_data_update_cco_section_v3 (Path: {section_path})")
if not isinstance(cco_data_dict, dict):
self.aios_log(self.current_context_mh, "fn_data_update_cco_section_v3: CCO data is not a dict. Update failed.")
return cco_data_dict # Return unchanged
keys = section_path.split('.')
current_level = cco_data_dict
try:
for i, key in enumerate(keys):
if i == len(keys) - 1: # Last key in path
# As per Autologos spec (e.g. IFE for core_essence_json),
# new_content_json_str_to_store is ALREADY A JSON STRING to be stored.
current_level[key] = new_content_json_str_to_store
else: # Navigate or create path
if key not in current_level or not isinstance(current_level[key], dict):
current_level[key] = {} # Create path if not exists
current_level = current_level[key]
# Update engine's canonical CCO representations
self.CCO_data = cco_data_dict
self.Kernel_ActiveCCO_JsonString = self.ConvertCNLObjectToJson(cco_data_dict)
except Exception as e:
self.aios_log(self.current_context_mh, f"Error in fn_data_update_cco_section_v3 for path '{section_path}': {e}")
# Consider how to handle error - for now, cco_data_dict might be partially modified.
return cco_data_dict # Return modified Python dict (Autologos returns updated CCO JSON string)
def fn_mro_RefineOutput_Pipeline_v3_0(self, draft_content_json_str, refinement_goals_obj, cco_context_json_str):
# This simulates the MRO_Orchestrator_v3_0 which itself has an Autologos script.
# For this Python engine, this becomes a request to the LLM to perform the MRO steps.
self.aios_log(self.current_context_mh, "fn_mro_RefineOutput_Pipeline_v3_0: Requesting LLM to execute MRO pipeline.")
cognitive_details = {
"task_name_from_spec": "RefineOutput_Pipeline_v3_0 (MRO)", # For LLM mapping
"input_draft_json_string": draft_content_json_str, # The content to be refined, as a JSON string
"refinement_goals": refinement_goals_obj, # Python dict containing quality criteria etc.
"current_cco_json_string_for_context": cco_context_json_str, # MRO needs CCO context
"guidance_for_llm": (
"LLM should simulate the MRO pipeline as defined in AIOS_Engine_Bootstrap.md (Section I.C). "
"This involves iterative critique (for quality, schema if specified), suggestion, and application of revisions. "
f"Critique should include AIOS v{self.engine_version_short} enhancements like Transformative Value, Information Density. "
"The final refined output should be returned."
),
"output_format_guidance": ("LLM should return a JSON object: "
"{'refined_output_json': <JSON_string_of_refined_content_object>, "
"'refinement_summary_json': <JSON_string_of_MRO_log_or_summary_object>, "
"'status': 'Success_Converged' or 'Success_MaxIterationsReached' or an error status}")
}
return self._create_llm_request(
task_type="COGNITIVE_TASK_REQUIRED_MRO_PIPELINE",
cognitive_task_details=cognitive_details,
expected_input_description="JSON object from LLM as per output_format_guidance. Orchestrator provides as 'llm_cognitive_result'.",
continuation_hint="Depends on calling MH logic, e.g., 'engine.run_mh_ife_step4_finalize_essence(llm_cognitive_result)'.",
cco_data_for_context=self.CCO_data # Provide current CCO dict
)
def fn_interpret_user_directive_for_next_mh_v3(self, user_input_text):
# This is the Python equivalent of the Autologos function `interpret_user_directive_for_next_mh_v3`
# which is INVOKEd by the Kernel.
self.aios_log(self.current_context_mh, f"fn_interpret_user_directive_for_next_mh_v3 interpreting: '{user_input_text}'")
uit_lower = user_input_text.lower().strip()
next_mh_id = "AWAIT_USER_INPUT" # Default if not understood
next_mh_inputs = {} # Python dict
user_prompt_message = "Command not fully understood by fn_interpret_user_directive. What would you like to do next?"
# Simplified interpretation logic based on Kernel's needs and available options
if "new process" == uit_lower or "1" == uit_lower or "1." == uit_lower or \
(uit_lower.startswith("new") and "process" in uit_lower) :
next_mh_id = "IFE-MH"
next_mh_inputs = {} # IFE can start with empty inputs
user_prompt_message = None
elif uit_lower in ["terminate aios", "terminate", "exit", "quit", "2", "2."]: # Assuming "2" is Terminate from options
next_mh_id = "TERMINATE_AIOS"
user_prompt_message = None
# Add more sophisticated interpretation here if needed for other commands.
result_obj = { # Python dict
"status": "Success" if next_mh_id != "AWAIT_USER_INPUT" else "InterpretationRequiresClarification",
"next_mh_id": next_mh_id,
"next_mh_inputs_json": self.ConvertCNLObjectToJson(next_mh_inputs) # Convert inputs to JSON string for Kernel state
}
if user_prompt_message: # If interpretation needs user to clarify
result_obj["user_prompt_message"] = user_prompt_message
return result_obj # Return Python dict
# --- AIOS Kernel Orchestration Methods ---
def start_engine(self):
"""Called by orchestrator to initialize and get the first user interaction point."""
self.current_context_mh = "Kernel"
self.aios_log(self.current_context_mh, f"{self.engine_version_full} - Kernel starting sequence.")
# First, an informational message. The orchestrator will handle its LLM_REQUEST.
# Then, the orchestrator will call kernel_present_initial_options.
# This makes the flow explicit for the orchestrator.
self.PresentUserMessage_v3_0("Status", f"{self.engine_version_full} Initializing. Ready to present options.")
# The primary request that needs user interaction is the options prompt.
return self.kernel_present_initial_options()
def kernel_present_initial_options(self):
"""Kernel step: Presents the first set of choices to the user via LLM_REQUEST."""
self.current_context_mh = "Kernel"
self.aios_log(self.current_context_mh, "Kernel: Presenting initial options.")
options_list_cnl = [
{"value": "New Process", "label": "1. New Process (Ideate & Draft using IFE-MH -> CAG-MH)"},
{"value": "Terminate AIOS", "label": "2. Terminate AIOS"}
# Other options like Resume, Evolve would be added here if implemented
]
# This call returns an LLM_REQUEST object
return self.fn_interaction_present_options_v3(
prompt_message_to_user=f"AIOS Engine v{self.engine_version_short} Ready. How would you like to begin?",
options_list_cnl=options_list_cnl
)
def kernel_process_initial_choice_result(self, llm_interaction_result_obj): # llm_interaction_result_obj is Python dict from LLM
self.current_context_mh = "Kernel"
# This object comes from the LLM after it handled fn_interaction_present_options_v3's request
if not llm_interaction_result_obj or llm_interaction_result_obj.get("status") != "USER_COMMAND":
self.aios_log(self.current_context_mh, "Invalid or missing result from initial options. Re-prompting.")
self.PresentUserMessage_v3_0("Warning", "Could not process your selection. Please try again.") # Generates LLM_REQUEST
return self.kernel_present_initial_options() # Returns another LLM_REQUEST
command_value = llm_interaction_result_obj.get("command") # This should be option value like "New Process" or raw text
self.aios_log(self.current_context_mh, f"Processing initial command/choice value: '{command_value}'")
# Use the fn_interpret_user_directive_for_next_mh_v3 to map command to MH
# This aligns with the Autologos Kernel script's logic.
interp_result_obj = self.fn_interpret_user_directive_for_next_mh_v3(command_value) # Returns Python dict
self.Kernel_CurrentMH_ID = interp_result_obj.get("next_mh_id")
self.Kernel_MH_Inputs_JsonString = interp_result_obj.get("next_mh_inputs_json") # This is already a JSON string
if self.Kernel_CurrentMH_ID == "TERMINATE_AIOS":
self.PresentUserMessage_v3_0("Status", "AIOS session termination initiated by user choice.") # LLM_REQUEST
return {"status": "TERMINATION_REQUESTED", "final_engine_state": self._get_engine_state_snapshot()} # Final status for orchestrator
elif self.Kernel_CurrentMH_ID and self.Kernel_CurrentMH_ID != "AWAIT_USER_INPUT":
self.aios_log(self.current_context_mh, f"Kernel: Next MH selected: {self.Kernel_CurrentMH_ID}")
return self.kernel_run_current_mh() # This will start the MH, likely returning an LLM_REQUEST
else: # AWAIT_USER_INPUT or unhandled/error from interpretation
self.PresentUserMessage_v3_0("Warning", interp_result_obj.get("user_prompt_message", f"Unrecognized choice: '{command_value}'. Please select an option from the list.")) # LLM_REQUEST
return self.kernel_present_initial_options() # Returns LLM_REQUEST to re-prompt
def kernel_run_current_mh(self):
"""Kernel step: Executes the currently selected MH or handles AWAIT_USER_INPUT/TERMINATE."""
self.current_context_mh = "Kernel" # Ensure context for top-level kernel ops
if self.Kernel_CurrentMH_ID == "TERMINATE_AIOS":
self.PresentUserMessage_v3_0("Status", "AIOS session terminated by Kernel directive.") # LLM_REQUEST
return {"status": "TERMINATED", "final_engine_state": self._get_engine_state_snapshot()}
elif self.Kernel_CurrentMH_ID == "AWAIT_USER_INPUT" or not self.Kernel_CurrentMH_ID :
self.aios_log(self.current_context_mh, "Kernel paused, awaiting general user directive.")
return self._create_llm_request( # Returns LLM_REQUEST
task_type="USER_INPUT_REQUIRED_GENERAL_DIRECTIVE",
prompt_to_user="The AIOS Engine is paused. What would you like to do next?",
expected_input_description="User's textual command (string), LLM returns {'status':'USER_COMMAND', 'command':'<text>'}.",
continuation_hint="engine.kernel_process_general_user_directive(llm_interaction_result_obj)"
)
self.aios_log(self.current_context_mh, f"Kernel: Preparing to execute MH: {self.Kernel_CurrentMH_ID}")
# The PresentUserMessage call returns an LLM_REQUEST. The orchestrator must handle it.
# The actual MH execution should be the primary return value of this function if it also generates an LLM_REQUEST.
# This means the orchestrator needs to process the "Executing MH" message, then process the MH's first request.
self.PresentUserMessage_v3_0("Status", f"Executing Meta-Heuristic: {self.Kernel_CurrentMH_ID} (v{self.engine_version_short})")
mh_inputs_obj = self.ParseJsonToCNLObject(self.Kernel_MH_Inputs_JsonString) # JSON String -> Python Dict
# --- MH Dispatch ---
if self.Kernel_CurrentMH_ID == "IFE-MH":
return self.run_mh_ife_step1_get_core_idea(mh_inputs_obj) # This itself returns an LLM_REQUEST
elif self.Kernel_CurrentMH_ID == "CAG-MH_SIMPLIFIED_DRAFT":
return self.run_mh_cag_simplified_draft_step1_prepare(mh_inputs_obj) # Also returns an LLM_REQUEST
# Add elif for other MHs (TDE-MH, FEL-MH, full CAG-MH, etc.) when implemented
else: # MH not implemented in this script
self.aios_log(self.current_context_mh, f"Error: MH '{self.Kernel_CurrentMH_ID}' execution path not implemented in this script.")
self.PresentUserMessage_v3_0("Error", f"Meta-Heuristic '{self.Kernel_CurrentMH_ID}' is not available in this version of the script.") # LLM_REQUEST
self.Kernel_CurrentMH_ID = "AWAIT_USER_INPUT" # Fallback to pause
return self.kernel_run_current_mh() # Recursive call, will lead to AWAIT_USER_INPUT LLM_REQUEST
def kernel_process_mh_result(self, mh_that_just_ran_id, mh_result_obj_from_mh_step): # mh_result_obj is Python Dict from MH step
self.current_context_mh = "Kernel"
status = mh_result_obj_from_mh_step.get("status")
self.aios_log(self.current_context_mh, f"Processing result from '{mh_that_just_ran_id}'. Status: '{status}'")
if not mh_result_obj_from_mh_step or not isinstance(mh_result_obj_from_mh_step, dict):
self.PresentUserMessage_v3_0("Error", f"Internal error: Invalid result structure from '{mh_that_just_ran_id}'.") # LLM_REQUEST
self.Kernel_CurrentMH_ID = "AWAIT_USER_INPUT"
return self.kernel_run_current_mh() # Will trigger AWAIT_USER_INPUT LLM_REQUEST
# Update CCO state based on MH's output (if any)
if "updated_cco_json" in mh_result_obj_from_mh_step: # If MH returned the full CCO JSON string
self.Kernel_ActiveCCO_JsonString = mh_result_obj_from_mh_step["updated_cco_json"]
self.CCO_data = self.ParseJsonToCNLObject(self.Kernel_ActiveCCO_JsonString) # Update Python dict version
self.aios_log(self.current_context_mh, f"CCO (JSON string state) updated by '{mh_that_just_ran_id}'.")
elif self.CCO_data is not None : # If CCO_data (Python dict) was modified by reference by an MH step, reserialize to string state
self.Kernel_ActiveCCO_JsonString = self.ConvertCNLObjectToJson(self.CCO_data)
if status == "AWAITING_LLM_ORCHESTRATION": # The MH step itself returned an LLM_REQUEST
self.aios_log(self.current_context_mh, f"'{mh_that_just_ran_id}' needs further LLM action: {mh_result_obj_from_mh_step.get('request_details',{}).get('task_type')}")
return mh_result_obj_from_mh_step # Pass the LLM_REQUEST up to the orchestrator.
# --- Determine Next Step Based on MH Completion ---
# This section would expand based on the designed inter-MH flow from AIOS_Engine_Bootstrap.md Kernel logic.
if mh_that_just_ran_id == "IFE-MH" and status == "IFE_ExplorationComplete_ReadyForNext":
self.CCO_data = self.LogToCCOHistory_v3_0(self.CCO_data, "MH_Completion", f"{mh_that_just_ran_id} completed with status: {status}.", mh_result_obj_from_mh_step.get("details_for_log"))
self.PresentUserMessage_v3_0("Status", f"IFE-MH exploration successfully completed for CCO ID '{self.CCO_data.get('cco_id') if self.CCO_data else 'N/A'}'") # LLM_REQUEST
# Transition to CAG as per simplified flow
self.Kernel_CurrentMH_ID = "CAG-MH_SIMPLIFIED_DRAFT"
self.Kernel_MH_Inputs_JsonString = self.ConvertCNLObjectToJson({"task_directive": "draft_initial_introduction", "current_cco_json": self.Kernel_ActiveCCO_JsonString})
return self.kernel_run_current_mh() # Triggers CAG, which will yield an LLM_REQUEST
elif mh_that_just_ran_id == "CAG-MH_SIMPLIFIED_DRAFT" and status == "CAG_DraftComplete_UserReview":
self.CCO_data = self.LogToCCOHistory_v3_0(self.CCO_data, "MH_Completion", f"{mh_that_just_ran_id} completed with status: {status}.", mh_result_obj_from_mh_step.get("details_for_log"))
# PresentUserMessage of draft was done by CAG itself (returned as an LLM_REQUEST if not handled yet by orchestrator)
# For flow control, ensure the final "Draft presented" message is the current action requested of the LLM if the PresentUserMessage within CAG finalize was the last one
self.PresentUserMessage_v3_0("Status", f"CAG-MH draft has been presented for CCO ID '{self.CCO_data.get('cco_id') if self.CCO_data else 'N/A'}' for your review.") # LLM_REQUEST
self.Kernel_CurrentMH_ID = "AWAIT_USER_INPUT"
return self.kernel_run_current_mh() # Kernel pauses and awaits general directive (generates LLM_REQUEST)
# Handle other MH completion statuses or errors from MHs
elif "Error" in status or "Failed" in status: # Generic error handling from MHs
self.CCO_data = self.LogToCCOHistory_v3_0(self.CCO_data or {"operational_log_cco_json":"[]"}, "MH_Error", f"{mh_that_just_ran_id} reported error status: {status}", mh_result_obj_from_mh_step.get("details_for_log"))
self.PresentUserMessage_v3_0("Error", f"Meta-Heuristic {mh_that_just_ran_id} encountered an issue: {status}.") # LLM_REQUEST
self.Kernel_CurrentMH_ID = "AWAIT_USER_INPUT"
return self.kernel_run_current_mh() # Pause and ask user what to do
else: # Default for other statuses or unhandled MH transitions
self.CCO_data = self.LogToCCOHistory_v3_0(self.CCO_data or {"operational_log_cco_json":"[]"}, "MH_Event", f"{mh_that_just_ran_id} returned unhandled status: {status}", mh_result_obj_from_mh_step.get("details_for_log"))
self.PresentUserMessage_v3_0("Info", f"Process step with {mh_that_just_ran_id} resulted in status: {status}.") # LLM_REQUEST
self.Kernel_CurrentMH_ID = "AWAIT_USER_INPUT"
return self.kernel_run_current_mh() # Pause
def kernel_process_general_user_directive(self, llm_interaction_result_obj): # expects full obj {'status':'USER_COMMAND', 'command': 'text...'}
self.current_context_mh = "Kernel"
user_directive_text = llm_interaction_result_obj.get("command", "") # Extract text from 'command' key
self.aios_log(self.current_context_mh, f"Processing general user directive: '{user_directive_text}'")
# Use the engine's own interpretation function
interp_result_obj = self.fn_interpret_user_directive_for_next_mh_v3(user_directive_text) # Returns Python dict
self.Kernel_CurrentMH_ID = interp_result_obj.get("next_mh_id", "AWAIT_USER_INPUT")
self.Kernel_MH_Inputs_JsonString = interp_result_obj.get("next_mh_inputs_json", "{}") # Is JSON string
if self.Kernel_CurrentMH_ID == "AWAIT_USER_INPUT" and interp_result_obj.get("user_prompt_message"):
# Interpretation wants to re-prompt more specifically
return self._create_llm_request( # Generate LLM_REQUEST
task_type="USER_INPUT_REQUIRED_GENERAL_DIRECTIVE",
prompt_to_user=interp_result_obj.get("user_prompt_message"),
continuation_hint="engine.kernel_process_general_user_directive(llm_interaction_result_obj)" # Loop back if needed
)
return self.kernel_run_current_mh() # Proceed to run MH or re-prompt if AWAIT_USER_INPUT without message
# --- Meta-Heuristic: IFE-MH (Idea Formulation & Exploration) ---
# Broken into resumable steps. Each step might return an LLM_REQUEST or directly call the next step if no LLM interaction is needed.
# `_ife_s` dictionary (instance variable) will store IFE's internal state across these resumable steps.
def run_mh_ife_step1_get_core_idea(self, mh_inputs_obj): # Expects Python dict for mh_inputs_obj
self.current_context_mh = "IFE-MH"
self.aios_log(self.current_context_mh, f"IFE-MH (v{self.engine_version_short}) Step 1: Initiated. Getting core idea.")
# The PresentUserMessage_v3_0 call returns an LLM_REQUEST.
# The orchestrator needs to handle this before the *actual* input request from fn_interaction_elicit_user_input_v3.
# This implies a sequence of LLM_REQUESTS might be generated by one engine method call if not careful.
# For this design, each method should ideally return ONE primary LLM_REQUEST that needs action.
# So, PresentUserMessage here is for logging; the main request is eliciting input.
self.PresentUserMessage_v3_0("Status", f"IFE-MH (v{self.engine_version_short}): Starting Idea Formulation & Exploration...") # Logged, LLM can show
self._ife_s = { # Internal state for this IFE run, persists across LLM calls for IFE
"UserPromptText_from_kernel": mh_inputs_obj.get("user_initial_prompt_text") if mh_inputs_obj else None,
"CCO_JsonString_from_kernel": mh_inputs_obj.get("existing_cco_json_if_reexploring") if mh_inputs_obj else self.Kernel_ActiveCCO_JsonString,
# These will be populated by subsequent steps if this IFE instance continues
"user_provided_core_idea": None,
"drafted_essence_json_str_for_mro": None, # From content_draft_text_segment before MRO
"refined_essence_json_str_for_cco": None # From MRO, ready for CCO update
}
# Initialize self.CCO_data (Python dict) from kernel state or keep as None
if self._ife_s["CCO_JsonString_from_kernel"]:
self.CCO_data = self.ParseJsonToCNLObject(self._ife_s["CCO_JsonString_from_kernel"])
else:
self.CCO_data = None
if self._ife_s["UserPromptText_from_kernel"]:
self.aios_log(self.current_context_mh, "Core idea already provided via Kernel input. Processing directly.")
# Simulate it coming from LLM interaction for consistent flow into next step
llm_like_result_for_direct_text = {"status":"USER_COMMAND", "command":self._ife_s["UserPromptText_from_kernel"], "user_text": self._ife_s["UserPromptText_from_kernel"]}
return self.run_mh_ife_step2_process_core_idea(llm_like_result_for_direct_text) # Call next step directly
else:
self.aios_log(self.current_context_mh, "Core idea not provided, requesting LLM to elicit from user.")
return self.fn_interaction_elicit_user_input_v3( # This returns an LLM_REQUEST
prompt_message_to_user="What is the core idea or problem you'd like to explore for this new AIOS process?"
# Orchestrator should use continuation_hint: engine.run_mh_ife_step2_process_core_idea(llm_interaction_result_obj)
)
def run_mh_ife_step2_process_core_idea(self, llm_interaction_result_obj): # Expects object like {'status':'USER_COMMAND', 'command':'text...'}
self.current_context_mh = "IFE-MH"
# Result obj has "command" and "user_text". "command" is primary payload.
user_provided_text = llm_interaction_result_obj.get("command")
self.aios_log(self.current_context_mh, f"IFE-MH Step 2: Received user's core idea: '{user_provided_text}'")
if not user_provided_text: # Check if command (text from user) is empty
self.PresentUserMessage_v3_0("Error", "IFE-MH: Core idea is essential and was not provided. Cannot proceed.")
# Return dict for kernel_process_mh_result
return {"status": "IFE_Failed_NoCoreIdeaProvided", "updated_cco_json": self.ConvertCNLObjectToJson(self.CCO_data)}
self._ife_s["user_provided_core_idea"] = user_provided_text
# Initialize CCO if none exists
if self.CCO_data is None: # Check Python dict form
self.aios_log(self.current_context_mh, "IFE-MH: No CCO exists, initializing new CCO.")
id_result_obj = self.fn_utility_generate_unique_id_v3(id_prefix="aios_cco_") # Returns dict
new_cco_id = id_result_obj.get("unique_id", f"fallback_cco_{uuid.uuid4().hex[:8]}")
prompt_summary = self._ife_s["user_provided_core_idea"]
if len(prompt_summary) > 50: prompt_summary = prompt_summary[:47] + "..."
# Construct CCO based on `AIOS_CCO_Schema_v3_0` from spec (I.A of AIOS_Engine_Bootstrap.md)
self.CCO_data = { # Python dict form
"cco_id": new_cco_id, "parent_cco_id": None,
"metadata_internal_cco": {
"name_label": f"AIOS CCO for: {prompt_summary}", "current_form": "Initial Idea Exploration (IFE)",
"target_product_form_descriptor": None, # To be defined later
"schema_version_used": f"AIOS_CCO_Schema_v3_0 (Engine v{self.engine_version_short})",
"engine_version_context": self.engine_version_full,
"user_provided_creation_date_context": self._get_timestamp(), # Use helper
"user_provided_last_modified_date_context": self._get_timestamp(),
"tags_keywords": [kw.strip() for kw in self._ife_s["user_provided_core_idea"].split(" ")[:3] if kw.strip()], # Example
"current_phase_id": "IFE_Active", "phase_history_json": self.ConvertCNLObjectToJson([]),
},
"core_essence_json": self.ConvertCNLObjectToJson(None), # Stores JSON string "null"
"initiating_document_scaled_json": self.ConvertCNLObjectToJson({"user_prompt": self._ife_s["user_provided_core_idea"]}),
"plan_structured_json": self.ConvertCNLObjectToJson(None),
"product_content_data_json": self.ConvertCNLObjectToJson(None),
"knowledge_artifacts_contextual_json": self.ConvertCNLObjectToJson({"conceptual_anchors_cco": []}), # Empty list
"execution_log_detailed_json": self.ConvertCNLObjectToJson(None),
"operational_log_cco_json": self.ConvertCNLObjectToJson([]), # Empty list
"associated_data_json": self.ConvertCNLObjectToJson(None),
"open_seeds_exploration_json": self.ConvertCNLObjectToJson(None)
}
self.CCO_data = self.LogToCCOHistory_v3_0(self.CCO_data, "IFE_MH_Event", "New CCO initialized by IFE-MH.", {"new_cco_id": new_cco_id}) # CCO_data updated by Log
self.PresentUserMessage_v3_0("Info", f"IFE-MH: New CCO created with ID: {new_cco_id}. CCO is now active.")
else: # CCO exists
self.aios_log(self.current_context_mh, f"IFE-MH: Using existing CCO: {self.CCO_data.get('cco_id', 'ID N/A')}")
self.CCO_data = self.LogToCCOHistory_v3_0(self.CCO_data, "IFE_MH_Event", f"IFE processing with core idea: '{self._ife_s['user_provided_core_idea']}'", {"prompt": self._ife_s['user_provided_core_idea']})
self.Kernel_ActiveCCO_JsonString = self.ConvertCNLObjectToJson(self.CCO_data) # Update engine's string CCO
# Step 2b: Draft Core Essence (Request LLM)
self.aios_log(self.current_context_mh, "IFE-MH Step 2b: Requesting LLM to draft core essence.")
draft_context_obj = { # Python dict to be passed to LLM cognitive task
"user_provided_idea_text": self._ife_s["user_provided_core_idea"],
"cco_id": self.CCO_data.get("cco_id"),
"current_phase_id": self.CCO_data.get("metadata_internal_cco",{}).get("current_phase_id"),
"engine_guidance": f"Focus on transformative potential. Adhere to AIOS v{self.engine_version_short} quality principles (TID_ASO_META_001, 002, 005 context if available)."
}
draft_instructions = ("Based on the user's idea, draft a 'core_essence_text' (1-3 impactful sentences). "
"This statement should clearly articulate the central theme and purpose. It will be stored in the CCO's 'core_essence_json' field (as a JSON string containing this object). "
"The text must be a string. Format your output as a JSON object: {\"core_essence_text\": \"<your_drafted_essence_string>\", \"status\": \"DraftComplete\"}")
return self.fn_content_draft_text_segment_v3( # This returns an LLM_REQUEST
instructions=draft_instructions, context_obj=draft_context_obj,
desired_length_hint="1-3 sentences, highly impactful", rhetorical_goal_hint="summarize_inspire_define_core_essence",
output_key_name="core_essence_text"
# Orchestrator will call 'engine.run_mh_ife_step3_process_essence_draft(llm_cognitive_result)'
)
def run_mh_ife_step3_process_essence_draft(self, llm_cognitive_result): # Expects result like {'core_essence_text': '...', 'status': 'DraftComplete'}
self.current_context_mh = "IFE-MH"
self.aios_log(self.current_context_mh, f"IFE-MH Step 3: Received LLM draft for core essence.")
if not llm_cognitive_result or llm_cognitive_result.get("status") != "DraftComplete" or not llm_cognitive_result.get("core_essence_text"):
self.PresentUserMessage_v3_0("Error", "IFE-MH: Failed to get valid core essence draft from LLM. Using placeholder.")
# Fallback: create a placeholder if LLM fails to provide valid essence draft
llm_cognitive_result = {"core_essence_text": "Placeholder essence due to drafting error from LLM.", "status":"ErrorFallback_Draft"}
# `llm_cognitive_result` is the Python dict: {"core_essence_text": "...", "status": "DraftComplete"}
# This needs to be stringified for MRO, which expects a JSON string input for `draft_content_json_str`
self._ife_s["drafted_essence_json_str_for_mro"] = self.ConvertCNLObjectToJson(llm_cognitive_result)
self.aios_log(self.current_context_mh, "IFE-MH Step 3a: Requesting LLM to refine core essence (simulated MRO).")
refinement_goals_obj = { # Python dict for MRO goals
# `quality_criteria_json` is a string containing JSON as per Autologos
"quality_criteria_json": self.ConvertCNLObjectToJson({
f"AIOS_v{self.engine_version_short}_TransformativeValueNoveltyFocus": True,
f"AIOS_v{self.engine_version_short}_InformationDensityConcisenessFocus": True,
"ClarityAndImpact": True,
"TargetProductForm": "Core Essence for CCO (object with 'core_essence_text')" # Hint for MRO/LLM
}),
"critique_focus_hint": "Maximize Transformative Value of the 'core_essence_text'. Ensure output is a JSON string containing this object: {'core_essence_text': 'refined text...', 'status':'Refined' (or 'Success_Converged') }.",
# No schema validation for this particular text blob object
"requires_schema_validation": False
}
return self.fn_mro_RefineOutput_Pipeline_v3_0( # This returns an LLM_REQUEST
draft_content_json_str=self._ife_s["drafted_essence_json_str_for_mro"], # e.g. '{"core_essence_text": "draft..."}'
refinement_goals_obj=refinement_goals_obj, # Python dict
cco_context_json_str=self.ConvertCNLObjectToJson(self.CCO_data) # MRO also needs CCO context as JSON string
# Orchestrator will call 'engine.run_mh_ife_step4_finalize_essence(llm_cognitive_result)'
)
def run_mh_ife_step4_finalize_essence(self, mro_cognitive_result): # Expects MRO output structure (Python dict)
self.current_context_mh = "IFE-MH"
self.aios_log(self.current_context_mh, f"IFE-MH Step 4: Received MRO result for core essence.")
if not mro_cognitive_result or mro_cognitive_result.get("status") not in ["Success_Converged", "Success_MaxIterationsReached"] or not mro_cognitive_result.get("refined_output_json"):
self.PresentUserMessage_v3_0("Warning", "IFE-MH: MRO did not successfully refine core essence. Using previous draft (if available) or placeholder.")
# refined_output_json from MRO is *already* a JSON string, e.g. '{"core_essence_text": "refined...", "status":"Refined"}'
self._ife_s["refined_essence_json_str_for_cco"] = mro_cognitive_result.get(
"refined_output_json",
self._ife_s.get("drafted_essence_json_str_for_mro", self.ConvertCNLObjectToJson({"core_essence_text":"Error in MRO, no essence available."}))
)
else:
self._ife_s["refined_essence_json_str_for_cco"] = mro_cognitive_result.get("refined_output_json")
# Update CCO.core_essence_json field with the JSON string from MRO.
self.CCO_data = self.fn_data_update_cco_section_v3(self.CCO_data, "core_essence_json", self._ife_s["refined_essence_json_str_for_cco"])
self.CCO_data = self.LogToCCOHistory_v3_0(self.CCO_data, "IFE_MH_Event",
f"Core essence finalized. MRO Status: {mro_cognitive_result.get('status', 'N/A')}",
self.ParseJsonToCNLObject(self._ife_s["refined_essence_json_str_for_cco"])) # Log the parsed object
self.aios_log(self.current_context_mh, "IFE-MH Concluded successfully.")
self.PresentUserMessage_v3_0("Status", "IFE-MH: Idea Exploration complete. Core essence is now in CCO.")
final_ife_result_to_kernel = { # Python object for kernel processing
"status": "IFE_ExplorationComplete_ReadyForNext", # Kernel uses this to decide next MH
"updated_cco_json": self.ConvertCNLObjectToJson(self.CCO_data), # Pass full CCO JSON string
"details_for_log": {"summary": f"IFE-MH finished for CCO ID: {self.CCO_data.get('cco_id') if self.CCO_data else 'N/A'}. Refined essence (JSON str): {self._ife_s['refined_essence_json_str_for_cco'][:100]}..."}
}
# This result is for the Kernel to process, no more LLM_REQUEST from *this* IFE flow.
# The kernel_process_mh_result will handle this and decide the next step (e.g., CAG or AWAIT_USER_INPUT).
return self.kernel_process_mh_result("IFE-MH", final_ife_result_to_kernel)
# --- Simplified CAG-MH (Content Auto-Generation) Steps ---
def run_mh_cag_simplified_draft_step1_prepare(self, mh_inputs_obj): # Python dict for inputs
self.current_context_mh = "CAG-MH_SIMPLIFIED_DRAFT"
self.aios_log(self.current_context_mh, "CAG-MH Step 1: Initiated. Preparing to draft section.")
self.PresentUserMessage_v3_0("Status", f"CAG-MH (v{self.engine_version_short}): Starting simplified content drafting...")
# Update self.CCO_data (Python dict) if full JSON string was passed from Kernel (as it was just updated by IFE)
cco_json_str_from_input = mh_inputs_obj.get("current_cco_json", self.Kernel_ActiveCCO_JsonString)
self.CCO_data = self.ParseJsonToCNLObject(cco_json_str_from_input)
if not self.CCO_data or not self.CCO_data.get("core_essence_json") or self.CCO_data.get("core_essence_json") == "null": # check actual content
self.PresentUserMessage_v3_0("Error", "CAG-MH: CCO or its core_essence_json is missing/invalid. Cannot draft.")
# Kernel will process this error status.
return self.kernel_process_mh_result("CAG-MH_SIMPLIFIED_DRAFT", {"status": "CAG_Error_MissingCoreEssence", "updated_cco_json": self.ConvertCNLObjectToJson(self.CCO_data)})
core_essence_container_str = self.CCO_data.get("core_essence_json", "{}") # This is a JSON string e.g. '{"core_essence_text": "..."}'
core_essence_obj = self.ParseJsonToCNLObject(core_essence_container_str) # This is Python dict e.g. {"core_essence_text": "..."}
core_essence_text_for_prompt = core_essence_obj.get("core_essence_text", "The defined core idea.") # Extract the actual text
self.PresentUserMessage_v3_0("Info", f"CAG-MH: Will draft initial section expanding on essence: '{core_essence_text_for_prompt}'")
# Prepare context for LLM to draft content
draft_context_obj = {
"core_essence_statement": core_essence_text_for_prompt, # Pass the actual text string
"cco_id": self.CCO_data.get("cco_id"),
# Example of fetching conceptual anchors:
"conceptual_anchors_from_cco": self.ParseJsonToCNLObject(self.CCO_data.get("knowledge_artifacts_contextual_json", "{}")).get("conceptual_anchors_cco", ["default_anchor_if_empty"]),
"target_document_part": "Introduction"
}
draft_instructions = ("Draft an introductory section for a document, based on the provided core_essence_statement. "
"This section should elaborate on the essence, outline key aspects, and engage the reader. "
f"Apply AIOS v{self.engine_version_short} principles: aim for 'Transformative Value' and 'Information Density'. "
"Proactively integrate conceptual_anchors_from_cco if they seem relevant to the topic.")
return self.fn_content_draft_text_segment_v3( # Returns LLM_REQUEST
instructions=draft_instructions, context_obj=draft_context_obj,
desired_length_hint="1-2 detailed paragraphs", rhetorical_goal_hint="document_introduction_and_overview",
output_key_name="section_draft_text" # Output from LLM will have this key for the text
# Orchestrator to call 'engine.run_mh_cag_simplified_draft_step2_process_draft(llm_cognitive_result)'
)
def run_mh_cag_simplified_draft_step2_process_draft(self, llm_cognitive_result): # Python dict like {"section_draft_text": "...", "status": "DraftComplete"}
self.current_context_mh = "CAG-MH_SIMPLIFIED_DRAFT"
self.aios_log(self.current_context_mh, f"CAG-MH Step 2: Received LLM draft for section.")
if not llm_cognitive_result or llm_cognitive_result.get("status") != "DraftComplete" or not llm_cognitive_result.get("section_draft_text"): # Corrected key check
self.PresentUserMessage_v3_0("Error", "CAG-MH: Failed to get valid section draft. Using placeholder.")
llm_cognitive_result = {"section_draft_text":"Placeholder section due to drafting error.","status":"ErrorFallback_Draft"}
# Content to refine for MRO. MRO expects a JSON string of an object like '{"text_to_refine": "..."}'
# The output key from fn_content_draft_text_segment was "section_draft_text".
text_for_mro_input = llm_cognitive_result.get("section_draft_text")
self._cag_s["draft_section_json_str_for_mro"] = self.ConvertCNLObjectToJson({"text_to_refine": text_for_mro_input})
self.aios_log(self.current_context_mh, "CAG-MH Step 2a: Requesting LLM to refine section draft (simulated MRO).")
refinement_goals_obj = { # Python dict
"target_product_form_descriptor": "Introductory Document Section",
"quality_criteria_json": self.ConvertCNLObjectToJson({ # This part is a JSON string
f"AIOS_v{self.engine_version_short}_TransformativeValueNoveltyFocus": True,
f"AIOS_v{self.engine_version_short}_InformationDensityEfficientComplexity": True,
"ClarityAndEngagement": True, "LogicalFlow": True, "AdherenceToRhetoricalGoal": "introduction_and_frame_document"
}),
"critique_focus_hint": "Refine for compelling narrative, logical structure, and maximum insight per word. Ensure proactive integration of themes/anchors from CCO context (provided to MRO).",
}
return self.fn_mro_RefineOutput_Pipeline_v3_0( # Returns LLM_REQUEST
draft_content_json_str=self._cag_s["draft_section_json_str_for_mro"], # e.g., '{"text_to_refine": "draft..."}'
refinement_goals_obj=refinement_goals_obj, # Python dict
cco_context_json_str=self.ConvertCNLObjectToJson(self.CCO_data) # MRO also needs CCO context as JSON string
# Orchestrator to call 'engine.run_mh_cag_simplified_draft_step3_finalize_draft(llm_cognitive_result)'
)
def run_mh_cag_simplified_draft_step3_finalize_draft(self, mro_cognitive_result): # Python dict from MRO sim
self.current_context_mh = "CAG-MH_SIMPLIFIED_DRAFT"
self.aios_log(self.current_context_mh, f"CAG-MH Step 3: Received MRO result for section.")
if not mro_cognitive_result or mro_cognitive_result.get("status") not in ["Success_Converged", "Success_MaxIterationsReached"] or not mro_cognitive_result.get("refined_output_json"):
self.PresentUserMessage_v3_0("Warning", "CAG-MH: MRO did not successfully refine section. Using pre-MRO draft or placeholder.")
# refined_output_json from MRO is a string '{"text_to_refine":"..."}'
refined_section_obj_json_str = mro_cognitive_result.get(
"refined_output_json",
self._cag_s.get("draft_section_json_str_for_mro", self.ConvertCNLObjectToJson({"text_to_refine":"Error in MRO for section."}))
)
else:
refined_section_obj_json_str = mro_cognitive_result.get("refined_output_json")
# Update CCO's product_content_data_json. This is simplified.
# A real CCO would have a more structured product_content_data_json, maybe a list of sections.
# The 'product_content_data_json' in CCO schema is just a STRING. It holds a JSON string
# of potentially complex content. Example: store it as {'introduction_section_json_str': refined_section_content_json_str}
product_data_dict = self.ParseJsonToCNLObject(self.CCO_data.get("product_content_data_json", "{}"))
if not isinstance(product_data_dict, dict): product_data_dict = {} # ensure it's a dict
if "document_sections" not in product_data_dict or not isinstance(product_data_dict["document_sections"], list):
product_data_dict["document_sections"] = []
new_section_entry_obj = { # This Python object represents one section
"section_title": "Introduction (Draft 1, Refined via MRO)",
"content_type": "paragraph_text_block",
"content_details_json_str": refined_section_obj_json_str, # Store the '{"text_to_refine":"..."}' string
"mro_refinement_summary_json_str": mro_cognitive_result.get("refinement_summary_json") # Store summary JSON string
}
product_data_dict["document_sections"].append(new_section_entry_obj) # Add to list
# The 'product_content_data_json' field in CCO must be a STRING that is valid JSON.
# So we convert the product_data_dict (which now contains the new section) back to a JSON string.
self.CCO_data = self.fn_data_update_cco_section_v3(self.CCO_data, "product_content_data_json", self.ConvertCNLObjectToJson(product_data_dict))
self.CCO_data = self.LogToCCOHistory_v3_0(self.CCO_data, "CAG_MH_Event", "Introductory section drafted and refined by MRO.", self.ParseJsonToCNLObject(refined_section_obj_json_str))
self.PresentUserMessage_v3_0("Status", "CAG-MH (Simplified): Introductory draft complete, refined, and stored in CCO.")
# For presenting to user, extract the actual text from refined_section_obj_json_str
refined_text_obj = self.ParseJsonToCNLObject(refined_section_obj_json_str) # Parse '{"text_to_refine":"..."}'
final_text_for_user = refined_text_obj.get("text_to_refine", "Error displaying draft content.")
# This PresentUserMessage will generate an LLM_REQUEST.
# The orchestrator needs to handle this, then the function returns to kernel_process_mh_result.
self.PresentUserMessage_v3_0("DraftOutput_ForReview", {
"section_title": "Draft Introduction (Refined)",
"content_text": final_text_for_user, # Present the actual text string
"notes": f"This draft has been processed by MRO for AIOS v{self.engine_version_short} quality principles."
})
# Return final result to Kernel
# This MH is now complete, kernel takes over.
return {"status": "CAG_DraftComplete_UserReview",
"updated_cco_json": self.ConvertCNLObjectToJson(self.CCO_data),
"details_for_log": {"summary": f"CAG-MH simplified draft for CCO ID '{self.CCO_data.get('cco_id') if self.CCO_data else 'N/A'}' complete and presented."}
}
# --- Orchestrator Guidance Comments ---
# This script defines the AIOS_Engine_v3_2_1 class.
# To run the engine, an orchestrating LLM (like the one in Google AI Studio) should:
# 1. Create an instance of this class in a Python code execution cell:
# `engine = AIOS_Engine_v3_2_1()`
#
# 2. Start the engine process by calling its entry point method. For example, to start the Kernel:
# `current_request_obj = engine.start_engine()`
# This call will execute some internal logic and then print an "LLM_REQUEST" block to the output.
# The `current_request_obj` variable will also hold this request dictionary.
#
# 3. The orchestrating LLM must then:
# a. Read the "LLM_REQUEST" from the code execution output (or the `current_request_obj` variable).
# b. Perform the task specified in the request:
# - If `task_type` is "USER_INPUT_REQUIRED...", the LLM interacts with the end-user (in chat)
# to get the specified input (e.g., present options and get a choice, or elicit free text).
# - If `task_type` is "COGNITIVE_TASK_REQUIRED...", the LLM uses its own capabilities
# to perform the described cognitive task (e.g., draft text, refine content, analyze data).
# - If `task_type` is "PRESENT_USER_MESSAGE_TO_USER", the LLM presents the message to the user.
# The engine logic might continue after this, or this might be the last action of a step.
#
# c. Construct a Python dictionary containing the result of its action. The format of this result
# is guided by the `expected_input_description_for_continuation` field in the LLM_REQUEST.
# For example, for user input, it's often:
# `llm_provided_result = {"status": "USER_COMMAND", "command": "User's actual text or chosen option value"}`
# For a cognitive task, it might be:
# `llm_provided_result = {"core_essence_text": "The drafted essence...", "status": "DraftComplete"}`
#
# 4. Call the appropriate continuation method on the *same engine instance*, passing the result dictionary.
# The `continuation_hint_for_orchestrator` field in the LLM_REQUEST block will suggest which
# engine method to call next (e.g., `engine.kernel_process_initial_choice_result(llm_provided_result)`
# or `engine.run_mh_ife_step2_process_core_idea(llm_provided_result)`).
# `current_request_obj = engine.method_from_hint(llm_provided_result)`
#
# 5. Repeat steps 3 and 4. Each call to an engine method will execute a segment of AIOS logic
# and will typically result in a new LLM_REQUEST (if further interaction or cognitive work is needed)
# or a final status object if a process/MH completes or terminates.
#
# The `engine` object (AIOS_Engine_v3_2_1 instance) maintains the state (like CCO_data, Kernel_CurrentMH_ID)
# across these separate Python cell executions / method calls, because the *same instance* is used by the orchestrator.
#
# This script provides the AIOS engine's *orchestration logic*; the orchestrating LLM provides the
# *execution environment for this script* and performs the user interactions and cognitive heavy-lifting.
if __name__ == "__main__":
# This block is for illustrative purposes if you were to save and run this script directly
# to see its initial log messages. It won't be interactive here without a human manually
# simulating the orchestrating LLM's role.
print(f"AIOS_Engine_v3_2_1.py - Python Orchestration Logic for AIOS v{AIOS_Engine_v3_2_1().engine_version_short}")
print("This script defines the AIOS_Engine_v3_2_1 class.")
print("It is designed to be orchestrated by an LLM with code execution capabilities.")
print("See the comments at the end of the file for 'Orchestrator Guidance'.")
# Example of instantiating for manual testing (non-orchestrated, first step only):
# print("\n--- SIMULATING ORCHESTRATOR START ---")
# engine = AIOS_Engine_v3_2_1()
# print("\nCalling engine.start_engine()...")
# initial_request = engine.start_engine()
# print(f"\nInitial LLM_REQUEST object returned to orchestrator would be (details were printed above by the call itself):")
# print(json.dumps(initial_request, indent=2))
# print("\n--- SIMULATION END ---")