线性模型 #
概述 #
线性模型是机器学习中最基础且广泛使用的模型族,它们假设目标变量与特征之间存在线性关系。
线性模型类型 #
| 类型 | 用途 | 代表模型 |
|---|---|---|
| 回归 | 预测连续值 | 线性回归、岭回归、Lasso |
| 分类 | 预测离散类别 | 逻辑回归、线性SVM |
线性回归 #
基本原理 #
线性回归寻找最优权重 w 和偏置 b,使得预测值 y = Xw + b 最接近真实值。
python
from sklearn.linear_model import LinearRegression
from sklearn.model_selection import train_test_split
from sklearn.metrics import mean_squared_error, r2_score
import numpy as np
X = np.array([[1], [2], [3], [4], [5]])
y = np.array([2, 4, 6, 8, 10])
model = LinearRegression()
model.fit(X, y)
print(f"权重: {model.coef_}")
print(f"偏置: {model.intercept_}")
print(f"预测: {model.predict([[6]])}")
多元线性回归 #
python
from sklearn.datasets import make_regression
X, y = make_regression(n_samples=100, n_features=3, noise=10, random_state=42)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=42)
model = LinearRegression()
model.fit(X_train, y_train)
y_pred = model.predict(X_test)
print(f"MSE: {mean_squared_error(y_test, y_pred):.4f}")
print(f"R²: {r2_score(y_test, y_pred):.4f}")
正规方程 #
线性回归使用正规方程求解:
text
w = (X^T X)^(-1) X^T y
岭回归(Ridge) #
L2 正则化 #
岭回归在损失函数中添加 L2 正则化项,防止过拟合。
python
from sklearn.linear_model import Ridge
X, y = make_regression(n_samples=100, n_features=20, noise=10, random_state=42)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=42)
ridge = Ridge(alpha=1.0)
ridge.fit(X_train, y_train)
print(f"训练分数: {ridge.score(X_train, y_train):.4f}")
print(f"测试分数: {ridge.score(X_test, y_test):.4f}")
正则化强度 #
python
import matplotlib.pyplot as plt
alphas = [0.001, 0.01, 0.1, 1, 10, 100]
train_scores = []
test_scores = []
for alpha in alphas:
ridge = Ridge(alpha=alpha)
ridge.fit(X_train, y_train)
train_scores.append(ridge.score(X_train, y_train))
test_scores.append(ridge.score(X_test, y_test))
plt.semilogx(alphas, train_scores, label='Train')
plt.semilogx(alphas, test_scores, label='Test')
plt.xlabel('Alpha')
plt.ylabel('R² Score')
plt.legend()
Lasso 回归 #
L1 正则化 #
Lasso 添加 L1 正则化项,可以产生稀疏解,实现特征选择。
python
from sklearn.linear_model import Lasso
X, y = make_regression(n_samples=100, n_features=20, n_informative=5, noise=10, random_state=42)
lasso = Lasso(alpha=0.1)
lasso.fit(X, y)
print(f"非零系数数量: {sum(lasso.coef_ != 0)}")
print(f"系数: {lasso.coef_}")
特征选择效果 #
python
lasso = Lasso(alpha=0.5)
lasso.fit(X_train, y_train)
selected_features = np.where(lasso.coef_ != 0)[0]
print(f"选择的特征索引: {selected_features}")
ElasticNet #
L1 + L2 正则化 #
ElasticNet 结合 L1 和 L2 正则化的优点。
python
from sklearn.linear_model import ElasticNet
enet = ElasticNet(alpha=1.0, l1_ratio=0.5)
enet.fit(X_train, y_train)
print(f"训练分数: {enet.score(X_train, y_train):.4f}")
print(f"测试分数: {enet.score(X_test, y_test):.4f}")
参数说明 #
| 参数 | 描述 |
|---|---|
alpha |
正则化强度 |
l1_ratio |
L1 正则化比例(0-1) |
python
l1_ratio = 0.5
l1_penalty = l1_ratio * alpha
l2_penalty = (1 - l1_ratio) * alpha
逻辑回归 #
二分类 #
逻辑回归用于二分类问题,使用 sigmoid 函数将输出映射到 [0, 1]。
python
from sklearn.linear_model import LogisticRegression
from sklearn.datasets import make_classification
from sklearn.metrics import accuracy_score, classification_report
X, y = make_classification(n_samples=1000, n_features=10, random_state=42)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=42)
log_reg = LogisticRegression(max_iter=1000)
log_reg.fit(X_train, y_train)
y_pred = log_reg.predict(X_test)
y_proba = log_reg.predict_proba(X_test)
print(f"准确率: {accuracy_score(y_test, y_pred):.4f}")
print(f"预测概率: {y_proba[:5]}")
多分类 #
python
from sklearn.datasets import load_iris
iris = load_iris()
X, y = iris.data, iris.target
log_reg = LogisticRegression(multi_class='multinomial', max_iter=1000)
log_reg.fit(X, y)
print(f"类别: {log_reg.classes_}")
print(f"系数形状: {log_reg.coef_.shape}")
正则化 #
python
log_reg_l1 = LogisticRegression(penalty='l1', solver='saga', max_iter=1000)
log_reg_l2 = LogisticRegression(penalty='l2', solver='lbfgs', max_iter=1000)
log_reg_enet = LogisticRegression(penalty='elasticnet', solver='saga', l1_ratio=0.5, max_iter=1000)
求解器选择 #
| 求解器 | 支持正则化 | 适用场景 |
|---|---|---|
lbfgs |
L2, None | 小数据集 |
liblinear |
L1, L2 | 小数据集 |
saga |
L1, L2, ElasticNet | 大数据集 |
sag |
L2, None | 大数据集 |
newton-cg |
L2, None | 多分类 |
SGD 回归/分类 #
随机梯度下降 #
使用 SGD 优化器,适合大规模数据。
python
from sklearn.linear_model import SGDRegressor, SGDClassifier
sgd_reg = SGDRegressor(max_iter=1000, tol=1e-3, random_state=42)
sgd_reg.fit(X_train, y_train)
sgd_clf = SGDClassifier(max_iter=1000, tol=1e-3, random_state=42)
sgd_clf.fit(X_train, y_train)
增量学习 #
python
sgd_reg = SGDRegressor()
for epoch in range(10):
for X_batch, y_batch in get_batches(X_train, y_train, batch_size=32):
sgd_reg.partial_fit(X_batch, y_batch)
多项式回归 #
使用 Pipeline #
python
from sklearn.preprocessing import PolynomialFeatures
from sklearn.pipeline import Pipeline
poly_reg = Pipeline([
('poly', PolynomialFeatures(degree=2)),
('linear', LinearRegression())
])
X = np.array([[1], [2], [3], [4], [5]])
y = np.array([1, 4, 9, 16, 25])
poly_reg.fit(X, y)
print(poly_reg.predict([[6]]))
拟合效果对比 #
python
degrees = [1, 2, 3, 5, 10]
for degree in degrees:
poly = Pipeline([
('poly', PolynomialFeatures(degree=degree)),
('linear', LinearRegression())
])
poly.fit(X_train, y_train)
print(f"Degree {degree}: R² = {poly.score(X_test, y_test):.4f}")
模型评估 #
回归指标 #
python
from sklearn.metrics import mean_squared_error, mean_absolute_error, r2_score
y_pred = model.predict(X_test)
mse = mean_squared_error(y_test, y_pred)
rmse = np.sqrt(mse)
mae = mean_absolute_error(y_test, y_pred)
r2 = r2_score(y_test, y_pred)
print(f"MSE: {mse:.4f}")
print(f"RMSE: {rmse:.4f}")
print(f"MAE: {mae:.4f}")
print(f"R²: {r2:.4f}")
分类指标 #
python
from sklearn.metrics import accuracy_score, precision_score, recall_score, f1_score
y_pred = log_reg.predict(X_test)
print(f"准确率: {accuracy_score(y_test, y_pred):.4f}")
print(f"精确率: {precision_score(y_test, y_pred):.4f}")
print(f"召回率: {recall_score(y_test, y_pred):.4f}")
print(f"F1分数: {f1_score(y_test, y_pred):.4f}")
正则化对比 #
系数对比 #
python
import matplotlib.pyplot as plt
linear = LinearRegression()
ridge = Ridge(alpha=10)
lasso = Lasso(alpha=0.1)
linear.fit(X_train, y_train)
ridge.fit(X_train, y_train)
lasso.fit(X_train, y_train)
plt.figure(figsize=(10, 5))
plt.plot(linear.coef_, 'o-', label='Linear')
plt.plot(ridge.coef_, 's-', label='Ridge')
plt.plot(lasso.coef_, '^-', label='Lasso')
plt.legend()
plt.xlabel('Feature Index')
plt.ylabel('Coefficient')
对比表 #
| 模型 | 正则化 | 特点 |
|---|---|---|
| LinearRegression | 无 | 简单,易过拟合 |
| Ridge | L2 | 系数收缩,不稀疏 |
| Lasso | L1 | 系数稀疏,特征选择 |
| ElasticNet | L1+L2 | 结合两者优点 |
超参数调优 #
交叉验证 #
python
from sklearn.model_selection import cross_val_score
ridge = Ridge(alpha=1.0)
scores = cross_val_score(ridge, X, y, cv=5, scoring='r2')
print(f"交叉验证分数: {scores}")
print(f"平均分数: {scores.mean():.4f}")
GridSearchCV #
python
from sklearn.model_selection import GridSearchCV
param_grid = {'alpha': [0.001, 0.01, 0.1, 1, 10, 100]}
ridge_cv = GridSearchCV(Ridge(), param_grid, cv=5)
ridge_cv.fit(X_train, y_train)
print(f"最佳参数: {ridge_cv.best_params_}")
print(f"最佳分数: {ridge_cv.best_score_:.4f}")
实战示例 #
房价预测 #
python
from sklearn.datasets import fetch_california_housing
from sklearn.preprocessing import StandardScaler
from sklearn.pipeline import Pipeline
housing = fetch_california_housing()
X, y = housing.data, housing.target
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=42)
model = Pipeline([
('scaler', StandardScaler()),
('ridge', Ridge(alpha=1.0))
])
model.fit(X_train, y_train)
print(f"测试 R²: {model.score(X_test, y_test):.4f}")
癌症分类 #
python
from sklearn.datasets import load_breast_cancer
cancer = load_breast_cancer()
X, y = cancer.data, cancer.target
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=42)
model = Pipeline([
('scaler', StandardScaler()),
('log_reg', LogisticRegression(max_iter=1000))
])
model.fit(X_train, y_train)
print(f"测试准确率: {model.score(X_test, y_test):.4f}")
最佳实践 #
1. 数据标准化 #
python
pipe = Pipeline([
('scaler', StandardScaler()),
('model', Ridge())
])
2. 选择正则化 #
python
if n_features > n_samples:
model = Lasso()
else:
model = Ridge()
3. 处理共线性 #
python
ridge = Ridge(alpha=10)
4. 特征选择 #
python
lasso = Lasso(alpha=0.1)
lasso.fit(X, y)
selected = np.where(lasso.coef_ != 0)[0]
下一步 #
掌握线性模型后,继续学习 决策树 了解非线性模型!
最后更新:2026-04-04