题目:编程实现对率回归,并给出西瓜数据集。
由于给定的数据集只有17个数据,所以决定直接使用一个单层的神经网络。
:对数几率函数 y = 1 1 + e − ( w T x + b ) y=\frac{1}{1+e^{-(w^Tx+b)}} y=1+e(wTx+b)1

#!/usr/bin/env python
# coding: utf-8

# In[80]:


import numpy as np
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split


# In[81]:


data =  np.array([[0.697, 0.460, 1],
        [0.774, 0.376, 1],
        [0.634, 0.264, 1],
        [0.608, 0.318, 1],
        [0.556, 0.215, 1],
        [0.403, 0.237, 1],
        [0.481, 0.149, 1],
        [0.437, 0.211, 1],
        [0.666, 0.091, 0],
        [0.243, 0.267, 0],
        [0.245, 0.057, 0],
        [0.343, 0.099, 0],
        [0.639, 0.161, 0],
        [0.657, 0.198, 0],
        [0.360, 0.370, 0],
        [0.593, 0.042, 0],
        [0.719, 0.103, 0]])


# In[82]:


x = data[:, 0:2]
y = data[:, 2]


# In[83]:


train_x, test_x, train_y, test_y = train_test_split(x, y, test_size=0.25, random_state=30)


# In[84]:


train_x = train_x.T
test_x = test_x.T
train_y = train_y.reshape(1, -1)
test_y = test_y.reshape(1, -1)


# In[85]:


# 初始化参数
def initialize_parameters():
    W = np.zeros((1, 2))
    b = 0
    
    return W, b


# In[86]:


def sigmoid(z):
    A = 1 / (1 + np.exp(-z))
    
    return A 


# In[87]:


# 前向传播
def forward_propagation(W, b, X):
    Z = np.dot(W, X) + b 
    A = sigmoid(Z)
    
    return A 


# In[88]:


# 损失函数
def compute_cost(A, Y):
    m = A.shape[1]
    cost = -1 / m * np.sum(Y * np.log(A) + (1 - Y) * np.log(1- A))
    
    return cost


# In[89]:


# 后向传播
def backward_propagation(X, A, Y):
    m = A.shape[1]
    dW = 1 / m * np.dot(A - Y, X.T)
    db = 1 / m * np.sum(A - Y, axis=1, keepdims=True)
    
    return dW, db


# In[90]:


# 更新参数
def update_parameters(dW, db, W, b, learning_rate=0.001):
    W = W - learning_rate * dW
    b = b - learning_rate * db
    
    return W, b 


# In[128]:


def model(train_x, train_y, learning_rate=0.05, num_iterations=3000, print_cost=False):
    costs = []
    W, b = initialize_parameters()
    for i in range(num_iterations):
        A = forward_propagation(W, b, train_x)
        cost = compute_cost(A, train_y)
        dW, db = backward_propagation(train_x, A, train_y)
        W, b = update_parameters(dW, db, W, b, learning_rate)
        if i % 100 == 0:
            costs.append(cost)
            if print_cost:
                print("after {} iterations, the cost is {}".format(i, cost))
    
    parameters = {"W": W,
            "b": b}
    plt.plot(costs)
    plt.xlabel("iterations")
    plt.ylabel("cost")
    plt.title("learning_rate=0.05")
    return parameters


# In[129]:


parameters = model(train_x, train_y, learning_rate=0.05, num_iterations=3000, print_cost=True)


# In[130]:


# 计算正确率
def score(parameters, X, Y):
    W = parameters["W"]
    b = parameters["b"]
    
    A = sigmoid(np.dot(W, X) + b)
    Y_predict = A
    m = A.shape[1]
    for i in range(m):
        if A[0, i] <= 0.5:
            Y_predict[0, i] = 0 
        else:
            Y_predict[0, i] = 1
    
    score = 1 - np.sum(abs(Y_predict - Y)) / m 
    print(score)


# In[131]:


score(parameters, train_x, train_y)


# In[132]:


score(parameters, test_x, test_y)

Logo

CSDN联合极客时间,共同打造面向开发者的精品内容学习社区,助力成长!

更多推荐