2016年4月12日星期三,科比在洛杉矶湖人队的最后一场比赛中砍下60
分,标志着他从NBA退役。利用20年来科比的进球和失误的数据,你能预测哪些投篮会落在篮框里吗?本实训主要实现:科比投篮数据可视化,包括射击精准度、赛季准确性、不同投篮距离的命中率、剩余时间是否影响准确率、不同对手的命中率、比赛节数与命中率之间的关系。
1.载入数据,函数库等 import numpy as np import pandas as pd import matplotlib.pyplot as
plt 2. 读入数据 data =
pd.read_csv("/data/shixunfiles/21979737119eb4fafd62cae509c0c571_1602468291676.csv")
3.显示大小 print("数据集大小:",data.shape) 4.数据集详细信息 print(data.info()) ''' ''' yes=0
no=0 for i in data[pd.notnull(data['shot_made_flag'])]['shot_made_flag']: if
i==1.0: yes+=1 else: no+=1 plt.bar([0,1], [yes,no]) plt.xticks([0,1])
plt.show() 5.保存数据 data_no = data[pd.isnull(data['shot_made_flag'])] data =
data[pd.notnull(data['shot_made_flag'])] print(data.shape) 6.创造画布并将数据显示
plt.figure(figsize = (10,10)) plt.subplot(1,2,1)
#alpha为不透明度,loc_x,loc_y为科比投篮的位置 plt.scatter(data.loc_x,data.loc_y,color
='g',alpha = 0.05) plt.title('loc_x and loc_y') plt.subplot(1,2,2)
#lat为纬度,lon为经度 plt.scatter(data.lon,data.lat,color ='b',alpha = 0.05)
plt.title('lat and lon') plt.show() data['remain_time'] =
data['minutes_remaining']*60 + data['seconds_remaining']
data['remain_time'][:5] import matplotlib.cm as cm plt.figure(figsize=(20,10))
# data.groupyby(feature),是将数据根据feature里的类进行分类 def scatterbygroupby(feature):
alpha = 0.1 gb = data.groupby(feature) cl =
cm.rainbow(np.linspace(0,1,len(gb))) for g,c in zip(gb,cl):
plt.scatter(g[1].loc_x,g[1].loc_y,color = c,alpha = alpha) # 画图对比三种投篮位置
plt.subplot(1,3,1) scatterbygroupby('shot_zone_basic')
plt.title('shot_zone_basic') plt.subplot(1,3,2)
scatterbygroupby('shot_zone_range') plt.title('shot_zone_range')
plt.subplot(1,3,3) scatterbygroupby('shot_zone_area')
plt.title('shot_zone_area') plt.show() 7.去掉如比赛id,投篮id等无关特征 drops =
['combined_shot_type','shot_id', 'team_id', 'team_name', 'shot_zone_area',
'shot_zone_range', 'shot_zone_basic', \ 'matchup', 'lon', 'lat',
'seconds_remaining', 'minutes_remaining', \ 'shot_distance', 'game_event_id',
'game_id', 'game_date','season'] for drop in drops: data = data.drop(drop, 1)
data.head() a = ['action_type', 'shot_type', 'opponent'] for i in a:
#使用one-hot编码,将a中的特征里的属性值都当作新的特征附在数据的列上,特征名为前缀prefix加上该属性名 data =
pd.concat([data, pd.get_dummies(data[i], prefix=i)], 1) data = data.drop(i, 1)
#0-行,1-列 data.head() data.to_csv("./data_processed.csv", encoding="utf-8-sig",
mode="w", header=True, index=False) data = pd.read_csv("data_processed.csv")
data_label = data['shot_made_flag'] #读入数据 data =
pd.read_csv("data_processed.csv") #显示大小 print("数据集大小:",data.shape) # 数据集详细信息
print(data.info()) data_feature = data.drop('shot_made_flag',1) data_label =
data['shot_made_flag'] data_label.shape data_label = np.array(data_label)
data_label.shape 8.进行数据标准化 #### 上述cell的答案 from sklearn.preprocessing import
StandardScaler scaler = StandardScaler() data_feature =
scaler.fit_transform(data_feature) data_feature = pd.DataFrame(data_feature)
data_feature.head() data_feature.to_csv("./data_feature_standard.csv",
encoding="utf-8-sig", mode="w", header=True, index=False) data_feature =
pd.read_csv("data_feature_standard.csv") #### 载入数据和标签 knn_data = data_feature
knn_label = data_label from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(knn_data,knn_label,
random_state=2020, test_size=0.25) from sklearn.neighbors import
KNeighborsClassifier knn = KNeighborsClassifier(n_neighbors=3) knn.fit(X_train,
y_train) score = knn.score(X_train, y_train)
print('训练数据集的准确率:{:.3}%'.format(score*100)) score = knn.score(X_test, y_test)
print('测试数据集的准确率:{:.3}%'.format(score*100)) # 运行时间较长,请耐心等待 # 交叉验证 from
sklearn.model_selection import cross_val_score from time import time import
datetime k_range = range(1,21,2) cv_scores = [] time0 = time() #
遍历1到21的分类数,得到每个分类数下的分数 for n in k_range: print(n) knn =
KNeighborsClassifier(n_neighbors=n) scores =
cross_val_score(knn,X_train,y_train,cv=10,scoring='accuracy')
cv_scores.append(scores.mean()) print('计算所用时长:%s' %
(datetime.datetime.fromtimestamp(time()-time0).strftime("%M:%S:%f")))
print('最高准确率:',max(cv_scores),',对应的k值为:',k_range[cv_scores.index(max(cv_scores))])
plt.plot(k_range,cv_scores) plt.xlabel('K') plt.ylabel('Accuracy') plt.show() #
进一步 k_range = range(17,23,2) cv_scores = [] time0 = time() for n in k_range:
print(n) knn = KNeighborsClassifier(n_neighbors=n) scores =
cross_val_score(knn,X_train,y_train,cv=10,scoring='accuracy')
cv_scores.append(scores.mean()) print('计算所用时长:%s' %
(datetime.datetime.fromtimestamp(time()-time0).strftime("%M:%S:%f")))
print('最高准确率:',max(cv_scores),',对应的k值为:',k_range[cv_scores.index(max(cv_scores))])
plt.plot(k_range,cv_scores) plt.xlabel('K') plt.ylabel('Accuracy') plt.show()
9.测试集评估 knn = KNeighborsClassifier(n_neighbors=19) knn.fit(X_train, y_train)
score = knn.score(X_train, y_train) print('训练数据集的准确率:{:.3}%'.format(score*100))
score = knn.score(X_test, y_test) print('测试数据集的准确率:{:.3}%'.format(score*100)) #
ROC from sklearn.metrics import roc_curve, auc, roc_auc_score from
sklearn.preprocessing import label_binarize # y_test_hot =
label_binarize(y_test, classes=(0, 1)) knn_y_score = knn.predict_proba(X_test)
knn_fpr, knn_tpr, _ = roc_curve(y_test,knn_y_score[:,1], pos_label=1)
plt.plot(knn_fpr, knn_tpr, label='micro-average ROC curve', color='g',
linewidth=4) plt.plot([0, 1], [0, 1], 'k--', lw=2,c='r') plt.title('knn roc')
plt.show() cnn_data = np.asarray(data_feature) cnn_label =
np.asarray(data_label) cnn_data.shape x_train = cnn_data[:20000] y_train =
cnn_label[:20000] x_test = cnn_data[20000:] y_test = cnn_label[20000:] from
keras import models from keras import layers model = models.Sequential() ##
输入层,激活函数为relu model.add(layers.Dense(16,activation='relu',input_shape=(95,)))
## 中间层,激活函数为relu model.add(layers.Dense(16,activation='relu')) ## 输出层,维数为1
model.add(layers.Dense(1,activation='sigmoid')) model.summary()
model.compile(optimizer = 'rmsprop', loss='binary_crossentropy',
metrics=['accuracy']) x_val = x_train[:7000] partial_x_train = x_train[7000:]
y_val = y_train[:7000] ······ 10.设置中间层不同的节点数,不同的迭代次数取最好的训练结果参数 for num1 in
nums1: for num2 in nums2: for epoch in epochs: model = models.Sequential()
model.add(layers.Dense(num1,activation='relu',input_shape=(95,)))
model.add(layers.Dense(num2,activation='relu'))
model.add(layers.Dense(1,activation='sigmoid')) model.compile(optimizer =
'rmsprop', loss='binary_crossentropy', metrics=['accuracy']) history =
model.fit(x_train,y_train,epochs=epoch,batch_size=512) results =
model.evaluate(x_test,y_test) if best_result<results[1]: best_result =
results[1] best_param = [] best_param.append(num1) best_param.append(num2)
best_param.append(epoch) print('最高的准确率:',best_result)
print('最好的参数:',best_param) model = models.Sequential()
model.add(layers.Dense(16,activation='relu',input_shape=(95,)))
model.add(layers.Dense(32,activation='relu'))
model.add(layers.Dense(1,activation='sigmoid')) model.compile(optimizer =
'rmsprop', loss='binary_crossentropy', metrics=['accuracy']) history =
model.fit(x_train,y_train,epochs=14,batch_size=512) results =
model.evaluate(x_test,y_test) results model.save('cnn_model.h5') # 载入 from
keras.models import load_model model = load_model('cnn_model.h5') # 绘制混淆矩阵 from
sklearn.metrics import confusion_matrix y_pred = model.predict(x_test) y_pred =
y_pred.reshape(-1) for i,pred in enumerate(y_pred): if pred>0.5: y_pred[i]=1.0
else: y_pred[i]=0.0 print(y_pred.shape) print(y_test.shape) # ROC from
sklearn.metrics import confusion_matrix from sklearn.metrics import roc_curve,
auc, roc_auc_score model_y_score = model.predict_proba(x_test) model_y_score =
model_y_score.reshape(-1) model_fpr, model_tpr, _ =
roc_curve(y_test,model_y_score, pos_label=1) plt.plot(model_fpr, model_tpr,
label='micro-average ROC curve', color='g', linewidth=4) plt.plot([0, 1], [0,
1], 'k--', lw=2,c='r') plt.title('model roc') plt.show() # AUC model_auc =
auc(model_fpr, model_tpr) model_auc

技术
下载桌面版
GitHub
百度网盘(提取码:draw)
Gitee
云服务器优惠
阿里云优惠券
腾讯云优惠券
华为云优惠券
站点信息
问题反馈
邮箱:ixiaoyang8@qq.com
QQ群:766591547
关注微信