date: 2021-12-21 01:27:40
tags: 机器学习

<>实现

n_train, n_test = len(X_train), len(X_test) W = np.ones(n_train) / n_train #

Weak_clf.fit(X_train, Y_train, sample_weight=W)

# 预测不正确的样本数，计算精度 miss = [int(x) for x in (pred_train_i != Y_train)] #

# 计算alpha alpha = 0.5 * np.log(float(1 - miss_w) / float(miss_w + 0.01)) #

np.exp([float(x) * alpha for x in factor])) W = W / sum(W) # normalization

# predict pred_train_i = [1 if x == 1 else -1 for x in pred_train_i]
pred_test_i= [1 if x == 1 else -1 for x in pred_test_i] pred_test = pred_test +
np.multiply(alpha, pred_test_i)

pred_test = (pred_test > 0) * 1 # pred = (pred > 0) * 1 return pred_test

weak_clf = DecisionTreeClassifier(criterion='entropy', max_depth=2) # 十折交叉验证
acc= [] pre = [] rec = [] f1 = [] Data = data.copy() kf = KFold(n_splits=10,
shuffle=True, random_state=0) # 10折 for train_index, test_index in tqdm(kf.split
(Data)): # 将数据划分为10折 train_data = Data[train_index] # 选取的训练集数据下标 test_data =
Data[test_index] # 选取的测试集数据下标 x_train = train_data[:, :8] y_train = train_data[:
, 8] x_test = test_data[:, :8] y_test = test_data[:, 8] scaler = StandardScaler(
) # 标准化转换 scaler.fit(x_train) # 训练标准化对象 x_train = scaler.transform(x_train)
scaler.fit(x_test) # 训练标准化对象 x_test = scaler.transform(x_test) pred_test =
my_adaboost(weak_clf, x_train, x_test, y_train, y_test, epoch) acc.append(
accuracy_score(y_test, pred_test)) pre.append(precision_score(y_test, pred_test)
) rec.append(recall_score(y_test, pred_test)) f1.append(f1_score(y_test,
pred_test)) # 计算测试集的精度，查准率，查全率，F1 print("My Adaboost outcome in test set with
{} epoch:".format(epoch)) print("ACC:", sum(acc) / 10) print("PRE: ", sum(pre) /
10) print("REC: ", sum(rec) / 10) print("F1: ", sum(f1) / 10)

Epoch): """ :param Weak_clf: :param X_train: :param X_test: :param Y_train:
:param Y_test: :param Epoch: :return: """ n_train, n_test = len(X_train), len(
X_test) W = np.ones(n_train) / n_train # 样本权重初始化 # W = np.ones(n) / n pred_train
, pred_test = [np.zeros(n_train), np.zeros(n_test)] # pred = [np.zeros(n)] for i
in range(Epoch): # 用特定权重训练分类器 Weak_clf.fit(X_train, Y_train, sample_weight=W)
pred_train_i= weak_clf.predict(X_train) pred_test_i = weak_clf.predict(X_test)
# pred_i = cross_val_predict(Weak_clf, X, Y, cv=10) # 预测不正确的样本数，计算精度 miss = [int
(x) for x in (pred_train_i != Y_train)] # 在当前权重下计算错误率 miss_w = np.dot(W, miss)
# 计算alpha alpha = 0.5 * np.log(float(1 - miss_w) / float(miss_w + 0.01)) # 权重的系数
factor= [x if x == 1 else -1 for x in miss] # 更新样本权重 W = np.multiply(W, np.exp(
[float(x) * alpha for x in factor])) W = W / sum(W) # normalization # predict
pred_train_i= [1 if x == 1 else -1 for x in pred_train_i] # pred_i = [1 if x ==
1 else -1 for x in pred_i] pred_test_i = [1 if x == 1 else -1 for x in
pred_test_i] pred_test = pred_test + np.multiply(alpha, pred_test_i) pred_train
= pred_train + np.multiply(alpha, pred_train_i) # pred = pred +
np.multiply(alpha, pred_i) pred_train = (pred_train > 0) * 1 pred_test = (
pred_test> 0) * 1 # pred = (pred > 0) * 1 return pred_test

GitHub

Gitee