实战案例 #
概述 #
本章通过完整的机器学习项目,展示从数据处理到模型部署的全流程。
项目流程 #
text
数据获取 → 数据探索 → 数据预处理 → 特征工程 → 模型训练 → 模型评估 → 模型部署
案例1:泰坦尼克号生存预测 #
项目背景 #
预测泰坦尼克号乘客的生存情况,这是一个经典的二分类问题。
完整代码 #
python
import pandas as pd
import numpy as np
from sklearn.model_selection import train_test_split, GridSearchCV
from sklearn.preprocessing import StandardScaler, OneHotEncoder
from sklearn.impute import SimpleImputer
from sklearn.compose import ColumnTransformer
from sklearn.pipeline import Pipeline
from sklearn.ensemble import RandomForestClassifier
from sklearn.metrics import classification_report, confusion_matrix
from joblib import dump
import matplotlib.pyplot as plt
import seaborn as sns
np.random.seed(42)
np.random.seed(42)
n_samples = 891
data = pd.DataFrame({
'Pclass': np.random.choice([1, 2, 3], n_samples, p=[0.24, 0.21, 0.55]),
'Sex': np.random.choice(['male', 'female'], n_samples),
'Age': np.random.normal(30, 14, n_samples).clip(0, 80),
'SibSp': np.random.choice(range(9), n_samples, p=[0.68, 0.23, 0.05, 0.02, 0.01, 0.005, 0.002, 0.002, 0.001]),
'Parch': np.random.choice(range(7), n_samples, p=[0.76, 0.13, 0.08, 0.02, 0.005, 0.003, 0.002]),
'Fare': np.abs(np.random.normal(32, 50, n_samples)),
'Embarked': np.random.choice(['S', 'C', 'Q'], n_samples, p=[0.72, 0.19, 0.09])
})
survival_prob = (
(data['Sex'] == 'female').astype(float) * 0.3 +
(data['Pclass'] == 1).astype(float) * 0.2 +
(data['Age'] < 18).astype(float) * 0.1 +
np.random.normal(0.3, 0.1, n_samples)
)
data['Survived'] = (survival_prob > 0.5).astype(int)
print("数据概览:")
print(data.head())
print("\n数据信息:")
print(data.info())
print("\n描述统计:")
print(data.describe())
print("\n缺失值统计:")
print(data.isnull().sum())
fig, axes = plt.subplots(2, 2, figsize=(12, 10))
sns.countplot(data=data, x='Survived', ax=axes[0, 0])
axes[0, 0].set_title('Survival Distribution')
sns.countplot(data=data, x='Sex', hue='Survived', ax=axes[0, 1])
axes[0, 1].set_title('Survival by Sex')
sns.histplot(data=data, x='Age', hue='Survived', bins=20, ax=axes[1, 0])
axes[1, 0].set_title('Age Distribution by Survival')
sns.barplot(data=data, x='Pclass', y='Survived', ax=axes[1, 1])
axes[1, 1].set_title('Survival Rate by Class')
plt.tight_layout()
plt.savefig('titanic_eda.png')
plt.close()
numeric_features = ['Age', 'Fare', 'SibSp', 'Parch']
categorical_features = ['Pclass', 'Sex', 'Embarked']
numeric_transformer = Pipeline([
('imputer', SimpleImputer(strategy='median')),
('scaler', StandardScaler())
])
categorical_transformer = Pipeline([
('imputer', SimpleImputer(strategy='most_frequent')),
('onehot', OneHotEncoder(handle_unknown='ignore'))
])
preprocessor = ColumnTransformer([
('num', numeric_transformer, numeric_features),
('cat', categorical_transformer, categorical_features)
])
pipe = Pipeline([
('preprocessor', preprocessor),
('classifier', RandomForestClassifier(random_state=42))
])
X = data.drop('Survived', axis=1)
y = data['Survived']
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42)
param_grid = {
'classifier__n_estimators': [50, 100, 200],
'classifier__max_depth': [5, 10, None],
'classifier__min_samples_split': [2, 5, 10]
}
grid_search = GridSearchCV(
pipe,
param_grid,
cv=5,
scoring='accuracy',
n_jobs=-1
)
grid_search.fit(X_train, y_train)
print(f"\n最佳参数: {grid_search.best_params_}")
print(f"最佳交叉验证分数: {grid_search.best_score_:.4f}")
best_model = grid_search.best_estimator_
y_pred = best_model.predict(X_test)
print("\n分类报告:")
print(classification_report(y_test, y_pred))
cm = confusion_matrix(y_test, y_pred)
plt.figure(figsize=(8, 6))
sns.heatmap(cm, annot=True, fmt='d', cmap='Blues')
plt.xlabel('Predicted')
plt.ylabel('Actual')
plt.title('Confusion Matrix')
plt.savefig('confusion_matrix.png')
plt.close()
feature_importance = best_model.named_steps['classifier'].feature_importances_
feature_names = (
numeric_features +
list(best_model.named_steps['preprocessor'].named_transformers_['cat'].named_steps['onehot'].get_feature_names_out(categorical_features))
)
importance_df = pd.DataFrame({
'feature': feature_names,
'importance': feature_importance
}).sort_values('importance', ascending=False)
plt.figure(figsize=(10, 6))
sns.barplot(data=importance_df.head(10), x='importance', y='feature')
plt.title('Top 10 Feature Importance')
plt.savefig('feature_importance.png')
plt.close()
dump(best_model, 'titanic_model.joblib')
print("\n模型已保存为 titanic_model.joblib")
def predict_survival(model, passenger_info):
df = pd.DataFrame([passenger_info])
prediction = model.predict(df)[0]
probability = model.predict_proba(df)[0]
return {
'survived': bool(prediction),
'probability': {
'not_survived': probability[0],
'survived': probability[1]
}
}
sample_passenger = {
'Pclass': 1,
'Sex': 'female',
'Age': 25,
'SibSp': 0,
'Parch': 0,
'Fare': 50,
'Embarked': 'S'
}
result = predict_survival(best_model, sample_passenger)
print(f"\n预测示例:")
print(f"乘客信息: {sample_passenger}")
print(f"预测结果: {result}")
案例2:房价预测 #
项目背景 #
预测房屋价格,这是一个回归问题。
完整代码 #
python
import pandas as pd
import numpy as np
from sklearn.datasets import fetch_california_housing
from sklearn.model_selection import train_test_split, cross_val_score
from sklearn.preprocessing import StandardScaler
from sklearn.pipeline import Pipeline
from sklearn.ensemble import GradientBoostingRegressor
from sklearn.metrics import mean_squared_error, r2_score, mean_absolute_error
from joblib import dump
import matplotlib.pyplot as plt
housing = fetch_california_housing()
X = pd.DataFrame(housing.data, columns=housing.feature_names)
y = housing.target
print("数据集信息:")
print(f"样本数: {X.shape[0]}")
print(f"特征数: {X.shape[1]}")
print(f"特征名: {housing.feature_names}")
print("\n数据概览:")
print(X.describe())
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42)
pipe = Pipeline([
('scaler', StandardScaler()),
('model', GradientBoostingRegressor(
n_estimators=200,
learning_rate=0.1,
max_depth=5,
random_state=42
))
])
pipe.fit(X_train, y_train)
cv_scores = cross_val_score(pipe, X, y, cv=5, scoring='r2')
print(f"\n交叉验证 R² 分数: {cv_scores.mean():.4f} (+/- {cv_scores.std():.4f})")
y_pred = pipe.predict(X_test)
mse = mean_squared_error(y_test, y_pred)
rmse = np.sqrt(mse)
mae = mean_absolute_error(y_test, y_pred)
r2 = r2_score(y_test, y_pred)
print(f"\n测试集评估:")
print(f"MSE: {mse:.4f}")
print(f"RMSE: {rmse:.4f}")
print(f"MAE: {mae:.4f}")
print(f"R²: {r2:.4f}")
plt.figure(figsize=(10, 6))
plt.scatter(y_test, y_pred, alpha=0.5)
plt.plot([y_test.min(), y_test.max()], [y_test.min(), y_test.max()], 'r--')
plt.xlabel('Actual Price')
plt.ylabel('Predicted Price')
plt.title('Actual vs Predicted House Prices')
plt.savefig('house_price_prediction.png')
plt.close()
residuals = y_test - y_pred
plt.figure(figsize=(10, 6))
plt.hist(residuals, bins=50, edgecolor='black')
plt.xlabel('Residual')
plt.ylabel('Count')
plt.title('Residual Distribution')
plt.savefig('residuals.png')
plt.close()
model = pipe.named_steps['model']
importance = pd.DataFrame({
'feature': housing.feature_names,
'importance': model.feature_importances_
}).sort_values('importance', ascending=False)
print("\n特征重要性:")
print(importance)
dump(pipe, 'house_price_model.joblib')
print("\n模型已保存为 house_price_model.joblib")
def predict_price(model, house_features):
df = pd.DataFrame([house_features])
price = model.predict(df)[0]
return price
sample_house = {
'MedInc': 5.0,
'HouseAge': 20,
'AveRooms': 6,
'AveBedrms': 1,
'Population': 1000,
'AveOccup': 3,
'Latitude': 34,
'Longitude': -118
}
predicted_price = predict_price(pipe, sample_house)
print(f"\n预测示例:")
print(f"房屋特征: {sample_house}")
print(f"预测价格: {predicted_price:.4f}")
案例3:文本分类 #
项目背景 #
对新闻文本进行分类,这是一个多分类问题。
完整代码 #
python
from sklearn.datasets import fetch_20newsgroups
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.model_selection import train_test_split, GridSearchCV
from sklearn.pipeline import Pipeline
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import classification_report, confusion_matrix
from joblib import dump
import matplotlib.pyplot as plt
import seaborn as sns
categories = ['alt.atheism', 'soc.religion.christian', 'comp.graphics', 'sci.med']
print("加载数据...")
train_data = fetch_20newsgroups(subset='train', categories=categories)
test_data = fetch_20newsgroups(subset='test', categories=categories)
X_train, y_train = train_data.data, train_data.target
X_test, y_test = test_data.data, test_data.target
print(f"训练集大小: {len(X_train)}")
print(f"测试集大小: {len(X_test)}")
print(f"类别: {train_data.target_names}")
pipe = Pipeline([
('tfidf', TfidfVectorizer(stop_words='english', max_features=10000)),
('clf', LogisticRegression(max_iter=1000, random_state=42))
])
param_grid = {
'tfidf__max_features': [5000, 10000],
'tfidf__ngram_range': [(1, 1), (1, 2)],
'clf__C': [0.1, 1, 10]
}
print("\n开始网格搜索...")
grid_search = GridSearchCV(
pipe,
param_grid,
cv=5,
scoring='accuracy',
n_jobs=-1
)
grid_search.fit(X_train, y_train)
print(f"\n最佳参数: {grid_search.best_params_}")
print(f"最佳交叉验证分数: {grid_search.best_score_:.4f}")
best_model = grid_search.best_estimator_
y_pred = best_model.predict(X_test)
print("\n分类报告:")
print(classification_report(y_test, y_pred, target_names=categories))
cm = confusion_matrix(y_test, y_pred)
plt.figure(figsize=(10, 8))
sns.heatmap(cm, annot=True, fmt='d', cmap='Blues',
xticklabels=categories, yticklabels=categories)
plt.xlabel('Predicted')
plt.ylabel('Actual')
plt.title('Confusion Matrix - Text Classification')
plt.xticks(rotation=45)
plt.tight_layout()
plt.savefig('text_classification_cm.png')
plt.close()
dump(best_model, 'text_classifier.joblib')
print("\n模型已保存为 text_classifier.joblib")
def classify_text(model, text):
prediction = model.predict([text])[0]
probability = model.predict_proba([text])[0]
return {
'category': categories[prediction],
'probabilities': {cat: prob for cat, prob in zip(categories, probability)}
}
sample_texts = [
"The new graphics card has amazing performance",
"Doctors recommend regular exercise for health",
"The Bible teaches about faith and salvation"
]
print("\n预测示例:")
for text in sample_texts:
result = classify_text(best_model, text)
print(f"\n文本: {text}")
print(f"预测类别: {result['category']}")
print(f"概率分布: {result['probabilities']}")
最佳实践总结 #
1. 项目结构 #
text
project/
├── data/
│ ├── raw/
│ └── processed/
├── notebooks/
│ └── exploration.ipynb
├── src/
│ ├── data.py
│ ├── features.py
│ ├── models.py
│ └── utils.py
├── models/
├── tests/
├── requirements.txt
└── README.md
2. 代码规范 #
- 使用有意义的变量名
- 添加必要的注释
- 遵循 PEP 8 规范
- 编写单元测试
3. 版本控制 #
- 使用 Git 管理代码
- 记录模型版本
- 保存数据处理脚本
4. 文档记录 #
- 记录数据处理步骤
- 说明模型选择原因
- 记录实验结果
总结 #
通过这些实战案例,你已经掌握了:
- 完整的机器学习项目流程
- 数据探索和可视化
- 特征工程和模型训练
- 模型评估和优化
- 模型保存和部署
继续实践,不断提升你的机器学习技能!
最后更新:2026-04-04