FrontPage

研究会(中島) 

専門ゼミ(中島) 

引き継ぎ(中島) 

メモ(中島) 

中間発表(中島) 

中間発表システムまとめ(中島) 

本論(中島) 

B3 前期授業スケジュール 

月曜日火曜日水曜日木曜日金曜日
1-2研究会
3-4卒論1研究会
5-6卒論1ディジタル信号処理卒論1卒論1
7-8技術者倫理
9-10研究会&size(px){Text you want to change};
11-12

&ref(): File not found: "ダッシュストーム.jpg" at page "中島"; &ref(): File not found: "ダッシュストーム.jpg" at page "中島"; &ref(): File not found: "ダッシュストーム.jpg" at page "中島"; &ref(): File not found: "ダッシュストーム.jpg" at page "中島"; &ref(): File not found: "ダッシュストーム.jpg" at page "中島"; &ref(): File not found: "ダッシュストーム.jpg" at page "中島";

&ref(): File not found: "栗松.jpg" at page "中島"; &ref(): File not found: "栗松.jpg" at page "中島"; &ref(): File not found: "栗松.jpg" at page "中島"; &ref(): File not found: "栗松.jpg" at page "中島"; &ref(): File not found: "栗松.jpg" at page "中島"; &ref(): File not found: "栗松.jpg" at page "中島";

メモ


n problem.solve(solver="OSQP", max_iter=self.max_iter)\n\n if problem.status != "optimal":\n raise ConvergenceWarning(\n f"Solver did not reach optimum (Status: {problem.status})"\n )\n\n beta_sol = np.concatenate([b.value for b in beta_variables], axis=0)\n beta_sol[np.abs(beta_sol) < self.tol] = 0\n\n intercept, coef = beta_sol[0], beta_sol[1:]\n coef = np.maximum(coef, 0) if self.positive else coef\n\n return coef, intercept\n\n\nfrom sklearn.linear_model import ElasticNetCV\nfrom sklearn.model_selection import GridSearchCV\nfrom sklearn.datasets import make_regression\nimport numpy as np\nfrom sklearn.model_selection import train_test_split\nimport pandas as pd\nfrom sklearn.preprocessing import StandardScaler\n\n# CSVファイルのパスを指定してください\nfile_path = "C:/Users/nt011/Desktop/研究/富山取引のほう富山市のみ/変数選択/変数作成後新新.csv"\n\n# CSVファイルの読み込み\ndf = pd.read_csv(file_path, encoding='cp932') # 文字コードが異なる場合は、'utf-8' を他のエンコーディングに変更してください\n\n# 特徴量とターゲットの分割\nX = df.drop(columns=['取引価格(㎡単価)']) # 取引価格(㎡単価)をyとして分離\ny = df['取引価格(㎡単価)'] # ターゲット変数\n\n# 🔥 標準化の実施\nscaler_X = StandardScaler()\nscaler_y = StandardScaler()\n\nX_scaled = scaler_X.fit_transform(X) # Xの標準化\ny_scaled = scaler_y.fit_transform(y.values.reshape(-1, 1)).ravel() # yの標準化\n\n\n# データ分割\nX_train, X_test, y_train, y_test = train_test_split(X_scaled, y_scaled, test_size=0.35, random_state=42)\n\n# 訓練データの一部を使用\nX_sample, _, y_sample, _ = train_test_split(X_train, y_train, test_size=0.8, random_state=42)\n\n# 第一段階: ElasticNetCVを使用した最適パラメータの導出\nenet_cv = ElasticNetCV(\n l1_ratio=np.linspace(0.0001, 1, 25), # l1_ratioの候補\n alphas=np.logspace(-5, 0, 25), # alphaの候補\n cv=5, # 交差検証の分割数\n random_state=42,\n n_jobs=-1\n)\nenet_cv.fit(X_train, y_train)\n\n# 第一段階の最適パラメータと係数を取得\nalpha1_opt = enet_cv.alpha_\nl1_ratio1_opt = enet_cv.l1_ratio_\nenet_coef = enet_cv.coef_\n\nprint(f"第一段階の最適パラメータ: alpha1={alpha1_opt}, l1_ratio1={l1_ratio1_opt}"))



やること





1段階のElasticNetの正則化係数\n l1_ratio1 : float\n 第1段階のL1比率\n alpha2 : float\n 第2段階のElasticNetの正則化係数\n l1_ratio2 : float\n 第2段階のL1比率\n max_iter : int, optional\n 最大反復回数 (default=5000)\n tol : float, optional\n 収束許容誤差 (default=1e-6)\n\n Returns\n -------\n beta : ndarray\n 推定された係数 (n_features,)\n """\n\n n_samples, n_features = X.shape\n beta = np.zeros(n_features) # 係数の初期化\n beta_old = beta.copy()\n t = 1\n\n # Lipschitz定数の計算(手動でL2ノルム計算)\n L = np.sqrt(np.sum(X**2)) ** 2 / n_samples \n\n # L2 正則化項(第一段階のパラメータを利用)\n L2_penalty = alpha1 * (1 - l1_ratio1)\n\n for _ in prange(max_iter):\n # 勾配計算\n grad = -X.T @ (y - X @ beta) / n_samples \n\n # ソフトしきい値処理(L1 正則化)\n z = beta - grad / L\n shrink = alpha2 * l1_ratio2 * weights / L\n beta_new = np.sign(z) * np.maximum(np.abs(z) - shrink, 0)\n\n # Nesterov 加速\n t_new = (1 + np.sqrt(1 + 4 * t ** 2)) / 2\n beta = beta_new + *1.ravel()\n\n# Split data\nX_train, X_test, y_train, y_test = train_test_split(X_scaled, y_scaled, test_size=0.35, random_state=42)\n\n# 訓練データの一部を使用\nX_sample, _, y_sample, _ = train_test_split(X_train, y_train, test_size=0.8, random_state=42)\n\n# First-stage ElasticNet\nmodel = AdaptiveElasticNet(alpha1=0.00825404185268019, l1_ratio1=0.8750125, gamma=0.5)\n\n# Second-stage parameter grid\nparam_grid = {\n "alpha2": np.logspace(-4, 0, 5),\n "l1_ratio2": np.linspace(0.0001, 1.0, 5),\n}\n\n# GridSearchCV with accelerated weight calculation\ngrid_search = GridSearchCV(\n estimator=model,\n param_grid=param_grid,\n cv=3,\n scoring="neg_mean_squared_error",\n n_jobs=-1,\n verbose=1,\n)\n\n# Fit GridSearchCV\ngrid_search.fit(X_sample, y_sample)\n\n# Output best parameters and score\nprint(f"Best parameters: {grid_search.best_params_}")\nprint(f"Best negative MSE: {grid_search.best_score_}")\n)


*1 t - 1) / t_new) * (beta_new - beta_old)\n\n # 収束判定\n if np.linalg.norm(beta - beta_old, ord=2) < tol:\n break\n\n beta_old = beta.copy()\n t = t_new\n\n return beta\n\n \n\n def _optimize_second_stage(self, X, y, weights):\n """\n Perform second-stage optimization with adaptive weights using Numba-accelerated FISTA.\n """\n coef = self.fista(\n X, y, weights, self.alpha1, self.l1_ratio1, self.alpha2, self.l1_ratio2, max_iter=self.max_iter, tol=self.tol\n)\n\n\n\n\n\n intercept = np.mean(y - X @ coef) if self.fit_intercept else 0\n return coef, intercept\n\n\n\n# Updated usage with numba-optimized weight calculation\nfrom sklearn.model_selection import GridSearchCV\nfrom sklearn.datasets import make_regression\nfrom sklearn.model_selection import train_test_split\nimport pandas as pd\nfrom sklearn.preprocessing import StandardScaler\n\n# CSV file path\nfile_path = "C:/Users/nt011/Desktop/研究/富山取引のほう富山市のみ/変数選択/変数作成後新新.csv"\n\n# Load CSV data\ndf = pd.read_csv(file_path, encoding="cp932")\n\n# Split features and target\nX = df.drop(columns=["取引価格(㎡単価)"])\ny = df["取引価格(㎡単価)"]\n\n# Standardize data\nscaler_X = StandardScaler()\nscaler_y = StandardScaler()\n\nX_scaled = scaler_X.fit_transform(X)\ny_scaled = scaler_y.fit_transform(y.values.reshape(-1, 1

トップ   新規 一覧 検索 最終更新   ヘルプ   最終更新のRSS