# IO Simulation v2.1 Code (1D, P_target Adaptation Mechanism 2) ## 1. Objective This node provides the updated Python code for the 1D Information Dynamics (IO) simulation. This version (v2.1) implements the revised `P_target` update logic based on **Mechanism 2 (Gradual Adaptation to Context)** as specified in [[releases/archive/Information Ontology 1/0111_P_target_Dynamics_v2]]. It supersedes the logic used in the code presented in [[releases/archive/Information Ontology 1/0107_IO_Simulation_v2_1D_Implementation]]. The rest of the simulation structure (Η, Θ, K, M influences on `P_change` and target selection) remains the same as defined in [[releases/archive/Information Ontology 1/0104_IO_Formalism_v2_Summary]] and implemented in [[releases/archive/Information Ontology 1/0107_IO_Simulation_v2_1D_Implementation]]. ## 2. Key Changes from v2.0 Code ([[0107]]) * The `P_target` update logic within the main loop (inside the `if len(stable_indices) > 0:` block and the `if len(changing_indices) > 0:` block) is replaced with the new rules from [[releases/archive/Information Ontology 1/0111_P_target_Dynamics_v2]]. * A new parameter `lambda_adapt` is introduced. * Helper functions remain the same. ## 3. Python Code (v2.1) ```python import numpy as np import matplotlib.pyplot as plt import matplotlib.colors as mcolors import io import base64 # --- Parameters --- # These will be set specifically in execution nodes (e.g., 0113) # Example placeholder values: N = 200 S_max = 500 h = 0.5 alpha = 0.1 gamma = 1.0 p_M = 0.25 delta_theta_inc = 0.05 theta_max = 5.0 theta_base = 0.0 delta_p_inc = 0.01 # Base reinforcement rate for P_target lambda_adapt = 0.01 # Adaptation rate for P_target towards context (NEW) p_min = 1e-9 # --- Helper Functions (Identical to 0107) --- def normalize_p_target(p_target_array): """Normalizes P_target rows and applies p_min floor.""" if p_target_array.ndim == 1: p_target_array = p_target_array.reshape(1, -1) p_target_array = np.maximum(p_target_array, p_min) row_sums = p_target_array.sum(axis=1) row_sums[row_sums == 0] = 1 p_target_array = p_target_array / row_sums[:, np.newaxis] return p_target_array def calculate_k_local(epsilon_state): """Calculates local contrast K based on immediate neighbors.""" neighbors_left = np.roll(epsilon_state, 1) neighbors_right = np.roll(epsilon_state, -1) k_local = 0.5 * (np.abs(epsilon_state - neighbors_left) + np.abs(epsilon_state - neighbors_right)) return k_local def f_H(h_param, p_leave_array): """Η drive function.""" return np.clip(h_param * p_leave_array, 0.0, 1.0) def f_Theta(theta_val_array, alpha_param): """Θ resistance function.""" return 1.0 / (1.0 + alpha_param * theta_val_array) def f_K(k_local_array, gamma_param): """K gating function.""" return np.power(k_local_array, gamma_param) # --- Simulation Function --- def run_io_simulation_v2_1(params): """Runs the IO v2.1 simulation with given parameters.""" N = params['N'] S_max = params['S_max'] h = params['h'] alpha = params['alpha'] gamma = params['gamma'] p_M = params['p_M'] delta_theta_inc = params['delta_theta_inc'] theta_max = params['theta_max'] theta_base = params['theta_base'] delta_p_inc = params['delta_p_inc'] lambda_adapt = params['lambda_adapt'] p_min = params['p_min'] seed = params.get('seed', None) # Optional seed if seed is not None: np.random.seed(seed) else: np.random.seed() # Use default seeding if none provided # Initialization epsilon_state = np.random.randint(0, 2, size=N) p_target_state = np.full((N, 2), 0.5) theta_state = np.zeros(N) epsilon_history = np.zeros((S_max, N), dtype=int) avg_theta_history = np.zeros(S_max) avg_ptarget_entropy_history = np.zeros(S_max) # --- Simulation Loop --- for S in range(S_max): epsilon_history[S, :] = epsilon_state prev_epsilon = epsilon_state.copy() prev_p_target = p_target_state.copy() prev_theta = theta_state.copy() # --- Phase 1: Calculate Influences --- k_local = calculate_k_local(prev_epsilon) neighbors_left_eps = np.roll(prev_epsilon, 1) neighbors_right_eps = np.roll(prev_epsilon, -1) influence_0 = (neighbors_left_eps == 0).astype(int) + (neighbors_right_eps == 0).astype(int) influence_1 = (neighbors_left_eps == 1).astype(int) + (neighbors_right_eps == 1).astype(int) total_causal_weight = influence_0 + influence_1 # = 2 for this setup # --- Phase 2: Determine Probability of State Change --- p_leave = 1.0 - prev_p_target[np.arange(N), prev_epsilon] prob_H_driven = f_H(h, p_leave) prob_Theta_resisted = f_Theta(prev_theta, alpha) prob_K_gated = f_K(k_local, gamma) P_change = prob_H_driven * prob_Theta_resisted * prob_K_gated # --- Phase 3: Execute State Transition --- r_change = np.random.rand(N) change_mask = r_change < P_change no_change_mask = ~change_mask # --- Initialize next step states --- epsilon_state = prev_epsilon.copy() # Start assuming no change theta_state = prev_theta.copy() p_target_state = prev_p_target.copy() p_intermediate = np.zeros_like(p_target_state) # For P_target updates # --- Update nodes that change state --- changing_indices = np.where(change_mask)[0] if len(changing_indices) > 0: # Determine Target State (M bias via CA) p_target_0_intrinsic = prev_p_target[changing_indices, 0] p_target_1_intrinsic = prev_p_target[changing_indices, 1] mod_factor_0 = (1 + p_M * influence_0[changing_indices] / 2.0) mod_factor_1 = (1 + p_M * influence_1[changing_indices] / 2.0) p_prime_0 = p_target_0_intrinsic * mod_factor_0 p_prime_1 = p_target_1_intrinsic * mod_factor_1 sum_p_prime = p_prime_0 + p_prime_1 sum_p_prime[sum_p_prime == 0] = 1 P_modified_target_0 = p_prime_0 / sum_p_prime r_target = np.random.rand(len(changing_indices)) new_epsilon_for_changing = (r_target >= P_modified_target_0).astype(int) epsilon_state[changing_indices] = new_epsilon_for_changing # Update Theta (Reset) theta_state[changing_indices] = theta_base # Update P_target (Mechanism 2 - Reset part) # Set P_intermediate to baseline (uniform) for changed nodes p_intermediate[changing_indices, :] = 0.5 # --- Update nodes that do NOT change state --- stable_indices = np.where(no_change_mask)[0] if len(stable_indices) > 0: current_stable_eps = prev_epsilon[stable_indices] # Update Theta (Increment) theta_state[stable_indices] = np.minimum(prev_theta[stable_indices] + delta_theta_inc, theta_max) # Update P_target (Mechanism 2 - Reinforce part) # Calculate reinforced intermediate P_target for stable nodes p_target_current = prev_p_target[stable_indices, current_stable_eps] p_target_other = prev_p_target[stable_indices, 1 - current_stable_eps] new_p_target_current = p_target_current + delta_p_inc new_p_target_other = p_target_other - delta_p_inc temp_p_target = prev_p_target[stable_indices, :].copy() # Use previous as base temp_p_target[np.arange(len(stable_indices)), current_stable_eps] = new_p_target_current temp_p_target[np.arange(len(stable_indices)), 1 - current_stable_eps] = new_p_target_other # Normalize this intermediate step before adaptation p_intermediate[stable_indices, :] = normalize_p_target(temp_p_target) # --- Phase 3b/6b Combined: P_target Adaptation (Mechanism 2) --- # Calculate Contextual Target P_context for ALL nodes p_context = np.zeros_like(p_target_state) valid_ca = total_causal_weight > 0 valid_indices = np.where(valid_ca)[0] if len(valid_indices) > 0: p_context[valid_indices, 0] = influence_0[valid_indices] / total_causal_weight[valid_indices] p_context[valid_indices, 1] = influence_1[valid_indices] / total_causal_weight[valid_indices] # Handle nodes with zero total causal weight (e.g., isolated nodes if not ring) invalid_indices = np.where(~valid_ca)[0] if len(invalid_indices) > 0: p_context[invalid_indices, :] = 0.5 # Default to uniform if no input # Apply gradual adaptation: P_target = (1-lambda)*P_intermediate + lambda*P_context # Need P_intermediate for ALL nodes. Nodes that changed use uniform reset (0.5). # Nodes that didn't change use the reinforced version calculated above. # We need to correctly assign P_intermediate before this step. # Let's recalculate P_intermediate cleanly here for all nodes based on change_mask p_intermediate = np.zeros_like(p_target_state) # For changing nodes: Reset P_intermediate to uniform if len(changing_indices) > 0: p_intermediate[changing_indices, :] = 0.5 # For stable nodes: Reinforce P_intermediate if len(stable_indices) > 0: current_stable_eps = prev_epsilon[stable_indices] p_target_current = prev_p_target[stable_indices, current_stable_eps] p_target_other = prev_p_target[stable_indices, 1 - current_stable_eps] new_p_target_current = p_target_current + delta_p_inc new_p_target_other = p_target_other - delta_p_inc temp_p_target = prev_p_target[stable_indices, :].copy() temp_p_target[np.arange(len(stable_indices)), current_stable_eps] = new_p_target_current temp_p_target[np.arange(len(stable_indices)), 1 - current_stable_eps] = new_p_target_other p_intermediate[stable_indices, :] = normalize_p_target(temp_p_target) # Normalize intermediate # Now apply adaptation p_target_state = (1.0 - lambda_adapt) * p_intermediate + lambda_adapt * p_context # Apply H floor and Final Normalization p_target_state = normalize_p_target(p_target_state) # --- Phase 4: Update Causal Network (Not implemented) --- # --- Calculate Metrics for History --- avg_theta_history[S] = np.mean(theta_state) p0 = p_target_state[:, 0] # No need for max(p_min) here due to normalize_p_target p1 = p_target_state[:, 1] ptarget_entropy = - (p0 * np.log2(p0) + p1 * np.log2(p1)) avg_ptarget_entropy_history[S] = np.mean(ptarget_entropy) # --- Prepare Results --- results = { "parameters": params, "epsilon_history": epsilon_history, "avg_theta_history": avg_theta_history, "avg_ptarget_entropy_history": avg_ptarget_entropy_history } return results # --- Plotting Function --- def plot_results(results, title_suffix=""): """Generates plots from simulation results and returns base64 string.""" params = results["parameters"] epsilon_history = results["epsilon_history"] avg_theta_history = results["avg_theta_history"] avg_ptarget_entropy_history = results["avg_ptarget_entropy_history"] S_max = params['S_max'] fig, axs = plt.subplots(3, 1, figsize=(10, 8), sharex=True) cmap = mcolors.ListedColormap(['black', 'white']) axs[0].imshow(epsilon_history, cmap=cmap, aspect='auto', interpolation='none') axs[0].set_title(f'IO v2.1 Simulation {title_suffix}: ε State Evolution') axs[0].set_ylabel('Sequence Step (S)') axs[1].plot(avg_theta_history) axs[1].set_title('Average Stability (Θ_val)') axs[1].set_ylabel('Avg Θ_val') axs[1].grid(True) axs[2].plot(avg_ptarget_entropy_history) axs[2].set_title('Average Potentiality Entropy (H(P_target))') axs[2].set_xlabel('Sequence Step (S)') axs[2].set_ylabel('Avg Entropy (bits)') axs[2].grid(True) plt.tight_layout() buf = io.BytesIO() plt.savefig(buf, format='png') buf.seek(0) plot_base64 = base64.b64encode(buf.read()).decode('utf-8') buf.close() plt.close(fig) return plot_base64 # --- Example Usage (commented out - execution happens in separate nodes) --- # params_run4 = { # 'N': 200, 'S_max': 500, 'h': 0.5, 'alpha': 0.1, 'gamma': 1.0, # 'p_M': 0.25, 'delta_theta_inc': 0.05, 'theta_max': 5.0, # 'theta_base': 0.0, 'delta_p_inc': 0.01, 'lambda_adapt': 0.01, # Added lambda_adapt # 'p_min': 1e-9, 'seed': 42 # } # results_run4 = run_io_simulation_v2_1(params_run4) # plot_b64_run4 = plot_results(results_run4, title_suffix="(Run 4)") # print(f"Run 4 Complete. Final Avg Theta: {results_run4['avg_theta_history'][-1]:f}, Final Avg P_target Entropy: {results_run4['avg_ptarget_entropy_history'][-1]:f}") # print(f"Plot generated: {plot_b64_run4[:100]}...") ``` ## 4. Code Structure * Parameters are now grouped in a dictionary for easier management. * A main function `run_io_simulation_v2_1(params)` encapsulates the simulation logic. * A separate function `plot_results(results)` handles plotting. * The core simulation loop implements the logic from [[releases/archive/Information Ontology 1/0104_IO_Formalism_v2_Summary]] with the crucial change in **Phase 3b/6b Combined** to implement the `P_target` update using Mechanism 2 (Gradual Adaptation) from [[releases/archive/Information Ontology 1/0111_P_target_Dynamics_v2]]. ## 5. Next Steps This code is now ready to be executed with specific parameter sets in subsequent nodes (e.g., [[releases/archive/Information Ontology 1/0113_IO_Simulation_Run4]]) to test the effect of the revised `P_target` dynamics. The `run_io_simulation_v2_1` and `plot_results` functions will be called from those nodes with the desired parameters.