<>逻辑回归原理公式

y ^ = σ ( w T x + b ) , w h e r e   σ = 1 1 + e − x   ,   w , x ∈ R d \Large
\hat{y}=\sigma(w^{T}x+b),where \, \sigma=\frac{1}{1+e^{-x}}\,,\,w,x \in R^{d}y^​
=σ(wTx+b),whereσ=1+e−x1​,w,x∈Rd

P ( t a r g e t = 1 ∣ x i ) = y i ^ \large P(target=1|x_i)=\hat{y_i} P(target=
1∣xi​)=yi​^​
P ( t a r g e t = 0 ∣ x i ) = 1 − y i ^ \large P(target=0|x_i)=1-\hat{y_i} P(t
arget=0∣xi​)=1−yi​^​
l o s s = − ∏ y ^ i y i ( 1 − y i ^ ) 1 − y i \large loss=-\prod_{
{}}^{}\hat{y}_i^{y_i}(1-\hat{y_i})^{1-y_i}loss=−∏​y^​iyi​​(1−yi​^​)1−yi​
l o s s = − ∑ y i l o g ( y ^ i ) + ( 1 − y i ) l o g ( 1 − y i ^ ) \large
loss=-\sum_{}^{}y_ilog(\hat{y}_i)+(1-y_i)log(1-\hat{y_i})loss=−∑​yi​log(y^​i​)+(
1−yi​)log(1−yi​^​)

<>代码实现1

手动实现参数更新。
import torch epochs=100 lr=0.001 n_feature=2#特征维度 n_item=1000#样本数量 torch.
manual_seed(123) #生成假数据 X=torch.randn(size=(n_item,n_feature)).float() #如果
feature0* 2 - feature1 * 3 > 1 标签为1 否则为0 Y=torch.where(torch.sub(X[:,0]*2,X[:,1]
*3)>1,torch.tensor(1),torch.tensor(0)) class LogesticRegression(): def __init__(
self): #生成模型参数 self.w=torch.randn(size=(n_feature,1),requires_grad=True) self.b=
torch.zeros(size=(1,1),requires_grad=True) def forward(self,x): #y_hat=sig(wx+b)
y_hat=torch.sigmoid(torch.matmul(self.w.transpose(0,1),x)+self.b) return y_hat
defloss_func(self,y_hat,y): return -(y*torch.log(y_hat)+(1-y)*torch.log(1-y_hat)
) def train(self): print('w :',self.w) print('b :',self.b) for epoch in range(
epochs): avg_loss=0 for i in range(n_item):#此处逐个样本计算 y_hat=self.forward(X[i])
loss=self.loss_func(y_hat,Y[i]) avg_loss+=loss.item() loss.backward()#计算梯度 with
torch.no_grad():#下面的参数更新将不被梯度追踪 self.w.data-=lr*self.w.grad.data self.b.data-=lr
*self.b.grad.data #清空梯度 self.w.grad.zero_() self.b.grad.zero_() print('epoch :
%d loss: %0.3f avg_loss: %0.3f' % (epoch,loss.item(),avg_loss/n_item)) print('w
:',self.w) print('b :',self.b) if __name__=='__main__': lg_clasifier=
LogesticRegression() lg_clasifier.train()
<>代码实现2

使用torch中的优化器与损失函数。
import torch from torch.nn import Module import torch.nn.functional as F
n_feature=2 n_item=1000 epochs=100 lr=0.001 X=torch.randn(size=(n_item,n_feature
)).float() Y=torch.where(torch.sub(X[:,0]*2,X[:,1]*3)>1,torch.tensor(1),torch.
tensor(0)).long() Y=F.one_hot(Y)#参数Y得是long类型 print('X:',X.shape) print('Y:',Y.
shape) class BinaryClassificationModel(Module): def __init__(self): super().
__init__() self.linear_1=torch.nn.Linear(n_feature,2)#输出维度为2 def forward(self,x)
: """X:[batch_size,n_feature]""" output=self.linear_1(x) return torch.sigmoid(
output) model=BinaryClassificationModel() #设置优化器 optim=torch.optim.Adam(model.
parameters(), lr=lr) criteria=torch.nn.BCELoss() #打印参数 for name,param in model.
named_parameters(): print(name,param.size(),param) #开始训练 for epoch in range(
epochs): for i in range(n_item):#每一个样本当一个batch #清空梯度 optim.zero_grad() x=X[i].
unsqueeze(0)#增加batch维度 y=Y[i].unsqueeze(0).float() y_hat=model(x) loss=criteria(
y_hat, y) loss.backward()#计算梯度 optim.step()#更新参数 print('epoch : %d loss : %0.3f'
% (epoch,loss.item())) #打印参数 for name,param in model.named_parameters(): print(
name,param.size(),param)

技术
今日推荐
PPT
阅读数 126
下载桌面版
GitHub
百度网盘(提取码:draw)
Gitee
云服务器优惠
阿里云优惠券
腾讯云优惠券
华为云优惠券
站点信息
问题反馈
邮箱:ixiaoyang8@qq.com
QQ群:766591547
关注微信