import numpy as np import matplotlib.pyplot as plt from naginterfaces.library import opt def prepare_data(filename="mackey_glass.txt", n_neurons=50): try: raw_data = np.loadtxt(filename) except OSError: print("エラー: データファイルが見つかりません。") return None, None np.random.seed(42) W_res = np.random.normal(0, 1, (n_neurons, n_neurons)) rho = np.max(np.abs(np.linalg.eigvals(W_res))) W_res *= 0.9 / rho W_in = np.random.normal(0, 1, (n_neurons, 1)) states = [] x = np.zeros((n_neurons, 1)) washout = 50 for u in raw_data: u_in = np.array([[u]]) x = np.tanh(np.dot(W_in, u_in) + np.dot(W_res, x)) states.append(x.flatten()) X_full = np.array(states) X_train = X_full[washout:-1] y_train = raw_data[washout+1:] return X_train, y_train def lsqfun(w, nres, inform, data): X = data['X'] y = data['y'] pred = np.dot(X, w) rx = pred - y return rx, inform def lsqgrd(w, nres, rdx, inform, data): X = data['X'] rdx[:] = X.flatten() return inform def train_model(X, y_target, loss_type): n_samples, n_weights = X.shape handle = opt.handle_init(nvar=n_weights) opt.handle_set_nlnls(handle, nres=n_samples) # Loss関数の切り替え (ここが実験の本丸) opt.handle_opt_set(handle, f"NLDF Loss Function Type = {loss_type}") if loss_type == "SmoothL1": # ノイズとみなす閾値 (0.1以上ズレたらノイズ扱い) opt.handle_opt_set(handle, "NLDF SmoothL1 Function Width = 0.1") # 正則化設定 (L2の時もSmoothL1の時も、同じ「0.1」を使います) opt.handle_opt_set(handle, "Reg Term Type = L1") opt.handle_opt_set(handle, "Reg Coefficient = 0.1") opt.handle_opt_set(handle, "Print Level = 0") user_data = {'X': X, 'y': y_target} w_init = np.zeros(n_weights) w_out = np.zeros(n_weights) try: results = opt.handle_solve_nldf( handle=handle, lsqfun=lsqfun, lsqgrd=lsqgrd, x=w_init, nres=n_samples, data=user_data ) w_out = results.x except Exception as e: print(f"エラー: {e}") opt.handle_free(handle) return w_out def main(): n_neurons = 50 X, y_clean = prepare_data(n_neurons=n_neurons) if X is None: return y_noisy = y_clean.copy() # バーストノイズの生成 # 120~180ステップの間、信号を +2.0 ずらして「暴れさせる」 np.random.seed(999) noise_range = range(120, 180) for i in noise_range: y_noisy[i] += 2.0 + np.random.normal(0, 0.5) print("学習開始: L2 (Standard)...") # フェアな条件でL2学習 w_l2 = train_model(X, y_noisy, "L2") print("学習開始: SmoothL1 (Robust)...") # フェアな条件でSmoothL1学習 w_robust = train_model(X, y_noisy, "SmoothL1") print("グラフ描画中...") pred_l2 = np.dot(X, w_l2) pred_robust = np.dot(X, w_robust) fig, (ax1, ax2) = plt.subplots(2, 1, figsize=(10, 8), sharex=True) # 上段: 問題提起 (汚いデータ) ax1.plot(y_noisy[:300], color='green', alpha=0.5, linewidth=1, label="Noisy Input (Sensor Data)") ax1.plot(y_clean[:300], 'k-', linewidth=2, label="True Signal (Ideal)") ax1.set_title("Problem: Input Signal Corrupted by Burst Noise") ax1.legend(loc='upper right') ax1.grid() ax1.set_ylabel("Signal Value") # 下段: 解決策 (L2 vs SmoothL1) ax2.plot(y_clean[:300], 'k-', alpha=0.3, linewidth=5, label="True Signal") ax2.plot(pred_l2[:300], 'b-', linewidth=2, label="L2 (Standard)") ax2.plot(pred_robust[:300], 'r--', linewidth=3, label="SmoothL1 (Robust)") ax2.set_title("Solution: nAG Robust Regression Ignores Noise") ax2.legend(loc='upper right') ax2.grid() ax2.set_ylabel("Prediction") ax2.set_xlabel("Time Step") plt.tight_layout() plt.show() if __name__ == "__main__": main()