• 智能AI问答!更聪明、更完整、原创作!
  • 一款基于GPT的超级AI机器人,可以回答问题、聊天、创作、写文案!

总结全部过程代码

发布于 2024-06-04 13:49:08

下面是一个完整的示例,它涵盖了从读取数据、预处理、特征提取、模型训练、参数调整到评估结果的整个过程。注意,在实际应用中,你需要根据数据集的具体内容调整代码(例如文件路径和分隔符)。


# 导入必要的库
import pandas as pd
from sklearn.model_selection import train_test_split, GridSearchCV, cross_val_score
from sklearn.linear_model import Perceptron, LogisticRegression
from sklearn.neighbors import KNeighborsClassifier
from sklearn.naive_bayes import MultinomialNB
from sklearn.tree import DecisionTreeClassifier, export_graphviz
from sklearn.preprocessing import LabelEncoder
from sklearn.metrics import classification_report
from sklearn.feature_extraction.text import CountVectorizer
import matplotlib.pyplot as plt

# 数据导入和预处理
df = pd.read_csv('path_to_your_txt_file.txt', sep='\t', names=['label', 'message'])

# 显示前五行数据
print(df.head())

# 提取标签并转换为数值
le = LabelEncoder()
df['label'] = le.fit_transform(df['label'])

# 文本特征提取
vectorizer = CountVectorizer()
X = vectorizer.fit_transform(df['message'])
y = df['label']

# 数据划分
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=42)

# 感知机分类模型
perceptron = Perceptron()
perceptron_scores = cross_val_score(perceptron, X_train, y_train, cv=5)
print("Perceptron CV average score:", perceptron_scores.mean())

# K近邻分类模型
k_values = range(1, 31)
knn_scores = []
knn = KNeighborsClassifier()
for k in k_values:
    knn.n_neighbors = k
    score = cross_val_score(knn, X_train, y_train, cv=5).mean()
    knn_scores.append(score)

optimal_k = k_values[np.argmax(knn_scores)]
print(f"Optimal k value for KNN: {optimal_k}")

# 绘制KNN准确率随k变化的图
plt.figure(figsize=(10, 6))
plt.plot(k_values, knn_scores, marker='o', linestyle='-')
plt.title('KNN Accuracy vs. Number of Neighbors k')
plt.xlabel('Number of Neighbors k')
plt.ylabel('Cross-Validated Accuracy (%)')
plt.grid(True)
plt.show()

# 朴素贝叶斯分类模型
nb = MultinomialNB()
nb_scores = cross_val_score(nb, X_train, y_train, cv=5)
print("Naive Bayes CV average score:", nb_scores.mean())

# 决策树分类模型
param_grid = {
    'max_depth': [None, 10, 20, 30],
    'min_samples_leaf': [1, 2, 4],
    'min_samples_split': [2, 5, 10]
}
dt = DecisionTreeClassifier()
grid_search = GridSearchCV(dt, param_grid, cv=5)
grid_search.fit(X_train, y_train)

best_params = grid_search.best_params_
print("Best decision tree parameters:", best_params)

dt_best = DecisionTreeClassifier(**best_params)
dt_best.fit(X_train, y_train)

# 决策树可视化
dot_data = export_graphviz(dt_best, out_file=None, 
                           feature_names=vectorizer.get_feature_names_out(),
                           class_names=le.classes_, filled=True)
graph = Source(dot_data)
graph.view()  # 显示图形

# 逻辑回归分类模型
C_values = [0.01, 0.1, 1, 10, 100]
logistic_scores = []
for C in C_values:
    lr = LogisticRegression(C=C)
    score = cross_val_score(lr, X_train, y_train, cv=5).mean()
    logistic_scores.append((score, C))

best_C = max(logistic_scores, key=lambda x: x[0])[1]
print(f"Optimal C value for Logistic Regression: {best_C}")

# 绘制逻辑回归性能指标随C值变化图
plt.figure(figsize=(10, 6))
for metric_name in ['test_score']:
    values = [score[0] for score in logistic_scores if score[0] < 1]  # Exclude perfect scores
    plt.errorbar(C_values, [s * 100 for s in values], label=metric_name + ' (%)', fmt='o-')

plt.title('Logistic Regression Performance vs. Regularization Parameter C')
plt.xlabel('Regularization Parameter C')
plt.ylabel('Scores (%)')
plt.legend()
plt.xscale('log')
plt.grid(True)
plt
0 条评论

发布
问题

在线
客服