单输入

在这里插入图片描述

#wide&deep
#离散wide 表示对于每个特征值,直接叉乘,可以求出样本的所有可能性,可能过拟合,相当于记住每一个样本
#密集deep 每个词都用个向量表达,向量的距离对应词的差距,对应信息的差距

# 函数式API 功能API
input = keras.layers.Input(shape=x_train.shape[1:]) #输入的特征向量,一行,如果28*28的图片(转换为1*784)

hidden1 = keras.layers.Dense(30,activation="relu")(input)
hidden2 = keras.layers.Dense(30, activation='relu')(hidden1)

#结合hidden2(deep) 和input
concat = keras.layers.concatnate([input,hidden2])

output = keras.layers.Dense(1)(cancat)

#房价预测回归问题(较于class,regression的特征值对结果的价值不一样的)
####################################################################################

import matplotlib as mpl
import matplotlib.pyplot as plt
import numpy as np
import sklearn
import pandas as pd
import os
import sys
import time
import tensorflow as tf

from tensorflow import keras

print(tf.__version__)
print(sys.version_info)
for module in mpl, np, pd, sklearn, tf, keras:
    print(module.__name__, module.__version__)
####################################################################################
from sklearn.datasets import fetch_california_housing

housing = fetch_california_housing()
print(housing.DESCR)
print(housing.data.shape)
print(housing.target.shape)


####################################################################################
from sklearn.model_selection import train_test_split

x_train_all, x_test, y_train_all, y_test = train_test_split(
    housing.data, housing.target, random_state = 7)
x_train, x_valid, y_train, y_valid = train_test_split(
    x_train_all, y_train_all, random_state = 11)
print(x_train.shape, y_train.shape)
print(x_valid.shape, y_valid.shape)
print(x_test.shape, y_test.shape)

####################################################################################
from sklearn.preprocessing import StandardScaler
#归一化
scaler = StandardScaler()
x_train_scaled = scaler.fit_transform(x_train)
x_valid_scaled = scaler.transform(x_valid)
x_test_scaled = scaler.transform(x_test)

####################################################################################

#wide&deep
#离散wide 表示对于每个特征值,直接叉乘,可以求出样本的所有可能性,可能过拟合,相当于记住每一个样本
#密集deep 每个词都用个向量表达,向量的距离对应词的差距,对应信息的差距

# 函数式API 功能API
input = keras.layers.Input(shape=x_train.shape[1:]) #输入的特征向量,转为1维。一行,如果28*28的图片(转换为1*784)

hidden1 = keras.layers.Dense(30, activation='relu')(input)
hidden2 = keras.layers.Dense(30, activation='relu')(hidden1)

#结合hidden2(deep) 和input
concat = keras.layers.concatenate([input, hidden2])
output = keras.layers.Dense(1)(concat)

model = keras.models.Model(inputs = [input],
                           outputs = [output])

model.summary()
####################################################################################

model.compile(loss="mean_squared_error", optimizer="sgd")
callbacks = [keras.callbacks.EarlyStopping(
     patience=5, min_delta=1e-2)]


####################################################################################
history = model.fit(x_train_scaled, y_train,
                    validation_data = (x_valid_scaled, y_valid),
                    epochs = 100,
                    callbacks = callbacks)

####################################################################################

def plot_learning_curves(history):
    pd.DataFrame(history.history).plot(figsize=(8, 5))
    plt.grid(True)
    plt.gca().set_ylim(0, 1)
    plt.show()
plot_learning_curves(history)

####################################################################################

model.evaluate(x_test_scaled, y_test)

多输入

在这里插入图片描述

#多输入
#wide 有5个特征
input_wide = keras.layers.Input(shape=[5])

#deep 有6个特征
input_deep = keras.layers.Input(shape=[6])

hidden1 = keras.layers.Dense(30, activation='relu')(input_deep)
hidden2 = keras.layers.Dense(30, activation='relu')(hidden1)
concat = keras.layers.concatenate([input_wide, hidden2])
output = keras.layers.Dense(1)(concat)
model = keras.models.Model(inputs = [input_wide, input_deep],
                           outputs = [output])


#房价预测回归问题(较于class,regression的特征值对结果的价值不一样的)
####################################################################################

import matplotlib as mpl
import matplotlib.pyplot as plt
import numpy as np
import sklearn
import pandas as pd
import os
import sys
import time
import tensorflow as tf

from tensorflow import keras

print(tf.__version__)
print(sys.version_info)
for module in mpl, np, pd, sklearn, tf, keras:
    print(module.__name__, module.__version__)
####################################################################################
from sklearn.datasets import fetch_california_housing

housing = fetch_california_housing()
print(housing.DESCR)
print(housing.data.shape)
print(housing.target.shape)


####################################################################################
from sklearn.model_selection import train_test_split

x_train_all, x_test, y_train_all, y_test = train_test_split(
    housing.data, housing.target, random_state = 7)
x_train, x_valid, y_train, y_valid = train_test_split(
    x_train_all, y_train_all, random_state = 11)
print(x_train.shape, y_train.shape)
print(x_valid.shape, y_valid.shape)
print(x_test.shape, y_test.shape)

####################################################################################
from sklearn.preprocessing import StandardScaler
#归一化
scaler = StandardScaler()
x_train_scaled = scaler.fit_transform(x_train)
x_valid_scaled = scaler.transform(x_valid)
x_test_scaled = scaler.transform(x_test)


####################################################################################

#wide&deep
#离散wide 表示对于每个特征值,直接叉乘,可以求出样本的所有可能性,可能过拟合,相当于记住每一个样本
#密集deep 每个词都用个向量表达,向量的距离对应词的差距,对应信息的差距


#多输入
#wide 有5个特征
input_wide = keras.layers.Input(shape=[5])

#deep 有6个特征
input_deep = keras.layers.Input(shape=[6])

hidden1 = keras.layers.Dense(30, activation='relu')(input_deep)
hidden2 = keras.layers.Dense(30, activation='relu')(hidden1)
concat = keras.layers.concatenate([input_wide, hidden2])
output = keras.layers.Dense(1)(concat)
model = keras.models.Model(inputs = [input_wide, input_deep],
                           outputs = [output])



####################################################################################

model.compile(loss="mean_squared_error", optimizer="sgd")
callbacks = [keras.callbacks.EarlyStopping(
    patience=5, min_delta=1e-2)]


model.summary()

####################################################################################
#在上面定义好网的结构后,准备数据
x_train_scaled_wide = x_train_scaled[:, :5] #input_wide为全部样本的前五个特征
x_train_scaled_deep = x_train_scaled[:, 2:] #input_deep为全部样本的前六个特征

x_valid_scaled_wide = x_valid_scaled[:, :5]
x_valid_scaled_deep = x_valid_scaled[:, 2:]

x_test_scaled_wide = x_test_scaled[:, :5]
x_test_scaled_deep = x_test_scaled[:, 2:]

history = model.fit([x_train_scaled_wide, x_train_scaled_deep],
                    y_train,
                    validation_data = (
                        [x_valid_scaled_wide, x_valid_scaled_deep],
                        y_valid),
                    epochs = 100,
                    callbacks = callbacks)


####################################################################################

def plot_learning_curves(history):
    pd.DataFrame(history.history).plot(figsize=(8, 5))
    plt.grid(True)
    plt.gca().set_ylim(0, 1)
    plt.show()
plot_learning_curves(history)

####################################################################################

model.evaluate(x_test_scaled, y_test)

Model: "model"
__________________________________________________________________________________________________
Layer (type)                    Output Shape         Param #     Connected to
==================================================================================================
input_2 (InputLayer)            [(None, 6)]          0
__________________________________________________________________________________________________
dense (Dense)                   (None, 30)           210         input_2[0][0]
__________________________________________________________________________________________________
input_1 (InputLayer)            [(None, 5)]          0
__________________________________________________________________________________________________
dense_1 (Dense)                 (None, 30)           930         dense[0][0]
__________________________________________________________________________________________________
concatenate (Concatenate)       (None, 35)           0           input_1[0][0]
                                                                 dense_1[0][0]
__________________________________________________________________________________________________
dense_2 (Dense)                 (None, 1)            36          concatenate[0][0]
==================================================================================================
Total params: 1,176
Trainable params: 1,176
Non-trainable params: 0
__________________________________________________________________________________________________
Train on 11610 samples, validate on 3870 samples
Epoch 1/100
2019-12-08 12:44:46.126064: I tensorflow/stream_executor/platform/default/dso_loader.cc:44] Successfully opened dynamic library cublas64_100.dll
11610/11610 [==============================] - 3s 254us/sample - loss: 0.8408 - val_loss: 0.6333
Epoch 2/100
11610/11610 [==============================] - 1s 109us/sample - loss: 0.5304 - val_loss: 0.5133
Epoch 3/100
11610/11610 [==============================] - 1s 115us/sample - loss: 0.4671 - val_loss: 0.4912
Epoch 4/100
11610/11610 [==============================] - 1s 116us/sample - loss: 0.4533 - val_loss: 0.4662
Epoch 5/100
11610/11610 [==============================] - 1s 115us/sample - loss: 0.4304 - val_loss: 0.4485
Epoch 6/100
11610/11610 [==============================] - 1s 116us/sample - loss: 0.4210 - val_loss: 0.4361
Epoch 7/100
11610/11610 [==============================] - 1s 118us/sample - loss: 0.4156 - val_loss: 0.4280
Epoch 8/100
11610/11610 [==============================] - 1s 117us/sample - loss: 0.4061 - val_loss: 0.4369
Epoch 9/100
11610/11610 [==============================] - 1s 116us/sample - loss: 0.3984 - val_loss: 0.4191
Epoch 10/100
11610/11610 [==============================] - 1s 118us/sample - loss: 0.3924 - val_loss: 0.4056
Epoch 11/100
11610/11610 [==============================] - 1s 116us/sample - loss: 0.3888 - val_loss: 0.4018
Epoch 12/100
11610/11610 [==============================] - 1s 114us/sample - loss: 0.3822 - val_loss: 0.3972
Epoch 13/100
11610/11610 [==============================] - 1s 114us/sample - loss: 0.3767 - val_loss: 0.3950
Epoch 14/100
11610/11610 [==============================] - 1s 115us/sample - loss: 0.3744 - val_loss: 0.3809
Epoch 15/100
11610/11610 [==============================] - 1s 115us/sample - loss: 0.3674 - val_loss: 0.4122
Epoch 16/100
11610/11610 [==============================] - 1s 116us/sample - loss: 0.3644 - val_loss: 0.3731
Epoch 17/100
11610/11610 [==============================] - 1s 121us/sample - loss: 0.3604 - val_loss: 0.3702
Epoch 18/100
11610/11610 [==============================] - 1s 118us/sample - loss: 0.3549 - val_loss: 0.3677
Epoch 19/100
11610/11610 [==============================] - 1s 120us/sample - loss: 0.3539 - val_loss: 0.3631
Epoch 20/100
11610/11610 [==============================] - 1s 122us/sample - loss: 0.3498 - val_loss: 0.3559
Epoch 21/100
11610/11610 [==============================] - 1s 120us/sample - loss: 0.3481 - val_loss: 0.3611
Epoch 22/100
11610/11610 [==============================] - 1s 122us/sample - loss: 0.3426 - val_loss: 0.3495
Epoch 23/100
11610/11610 [==============================] - 1s 121us/sample - loss: 0.3406 - val_loss: 0.3872
Epoch 24/100
11610/11610 [==============================] - 1s 119us/sample - loss: 0.3385 - val_loss: 0.3433
Epoch 25/100
11610/11610 [==============================] - 1s 118us/sample - loss: 0.3399 - val_loss: 0.3449
Epoch 26/100
11610/11610 [==============================] - 1s 114us/sample - loss: 0.3330 - val_loss: 0.3446
Epoch 27/100
11610/11610 [==============================] - 1s 124us/sample - loss: 0.3314 - val_loss: 0.3592
Epoch 28/100
11610/11610 [==============================] - 1s 122us/sample - loss: 0.3326 - val_loss: 0.3382
Epoch 29/100
11610/11610 [==============================] - 1s 118us/sample - loss: 0.3323 - val_loss: 0.3502

在这里插入图片描述

多输出

在这里插入图片描述



#房价预测回归问题(较于class,regression的特征值对结果的价值不一样的)
####################################################################################

import matplotlib as mpl
import matplotlib.pyplot as plt
import numpy as np
import sklearn
import pandas as pd
import os
import sys
import time
import tensorflow as tf

from tensorflow import keras

print(tf.__version__)
print(sys.version_info)
for module in mpl, np, pd, sklearn, tf, keras:
    print(module.__name__, module.__version__)
####################################################################################
from sklearn.datasets import fetch_california_housing

housing = fetch_california_housing()
print(housing.DESCR)
print(housing.data.shape)
print(housing.target.shape)


####################################################################################
from sklearn.model_selection import train_test_split

x_train_all, x_test, y_train_all, y_test = train_test_split(
    housing.data, housing.target, random_state = 7)
x_train, x_valid, y_train, y_valid = train_test_split(
    x_train_all, y_train_all, random_state = 11)
print(x_train.shape, y_train.shape)
print(x_valid.shape, y_valid.shape)
print(x_test.shape, y_test.shape)

####################################################################################
from sklearn.preprocessing import StandardScaler
#归一化
scaler = StandardScaler()
x_train_scaled = scaler.fit_transform(x_train)
x_valid_scaled = scaler.transform(x_valid)
x_test_scaled = scaler.transform(x_test)


####################################################################################

#wide&deep
#离散wide 表示对于每个特征值,直接叉乘,可以求出样本的所有可能性,可能过拟合,相当于记住每一个样本
#密集deep 每个词都用个向量表达,向量的距离对应词的差距,对应信息的差距


#多输入
#wide 有5个特征
input_wide = keras.layers.Input(shape=[5])

#deep 有6个特征
input_deep = keras.layers.Input(shape=[6])

hidden1 = keras.layers.Dense(30, activation='relu')(input_deep)
hidden2 = keras.layers.Dense(30, activation='relu')(hidden1)
concat = keras.layers.concatenate([input_wide, hidden2])

output = keras.layers.Dense(1)(concat)
output2 = keras.layers.Dense(1)(hidden2)

model = keras.models.Model(inputs = [input_wide, input_deep],
                           outputs = [output,output2])



####################################################################################

model.compile(loss="mean_squared_error", optimizer="sgd")
callbacks = [keras.callbacks.EarlyStopping(
    patience=5, min_delta=1e-2)]


model.summary()
####################################################################################
x_train_scaled_wide = x_train_scaled[:, :5]
x_train_scaled_deep = x_train_scaled[:, 2:]

x_valid_scaled_wide = x_valid_scaled[:, :5]
x_valid_scaled_deep = x_valid_scaled[:, 2:]

x_test_scaled_wide = x_test_scaled[:, :5]
x_test_scaled_deep = x_test_scaled[:, 2:]





history = model.fit([x_train_scaled_wide, x_train_scaled_deep],
                    [y_train, y_train],
                    validation_data = (
                        [x_valid_scaled_wide, x_valid_scaled_deep],
                        [y_valid, y_valid]),
                    epochs = 100,
                    callbacks = callbacks)

####################################################################################

def plot_learning_curves(history):
    pd.DataFrame(history.history).plot(figsize=(8, 5))
    plt.grid(True)
    plt.gca().set_ylim(0, 1)
    plt.show()
plot_learning_curves(history)

####################################################################################

model.evaluate(x_test_scaled, y_test)

Model: "model"
__________________________________________________________________________________________________
Layer (type)                    Output Shape         Param #     Connected to
==================================================================================================
input_2 (InputLayer)            [(None, 6)]          0
__________________________________________________________________________________________________
dense (Dense)                   (None, 30)           210         input_2[0][0]
__________________________________________________________________________________________________
input_1 (InputLayer)            [(None, 5)]          0
__________________________________________________________________________________________________
dense_1 (Dense)                 (None, 30)           930         dense[0][0]
__________________________________________________________________________________________________
concatenate (Concatenate)       (None, 35)           0           input_1[0][0]
                                                                 dense_1[0][0]
__________________________________________________________________________________________________
dense_2 (Dense)                 (None, 1)            36          concatenate[0][0]
__________________________________________________________________________________________________
dense_3 (Dense)                 (None, 1)            31          dense_1[0][0]
==================================================================================================
Total params: 1,207
Trainable params: 1,207
Non-trainable params: 0
__________________________________________________________________________________________________
Train on 11610 samples, validate on 3870 samples
Epoch 1/100
2019-12-08 12:55:06.788012: I tensorflow/stream_executor/platform/default/dso_loader.cc:44] Successfully opened dynamic library cublas64_100.dll
11610/11610 [==============================] - 4s 352us/sample - loss: 1.7991 - dense_2_loss: 0.8205 - dense_3_loss: 0.9784 - val_loss: 1.2087 - val_dense_2_loss: 0.5069 - val_dense_3_loss: 0.7016
Epoch 2/100
11610/11610 [==============================] - 2s 147us/sample - loss: 1.0620 - dense_2_loss: 0.4562 - dense_3_loss: 0.6059 - val_loss: 1.0322 - val_dense_2_loss: 0.4493 - val_dense_3_loss: 0.5827
Epoch 3/100
11610/11610 [==============================] - 2s 145us/sample - loss: 1.0405 - dense_2_loss: 0.4716 - dense_3_loss: 0.5690 - val_loss: 1.0633 - val_dense_2_loss: 0.4478 - val_dense_3_loss: 0.6153
Epoch 4/100
11610/11610 [==============================] - 2s 144us/sample - loss: 0.9364 - dense_2_loss: 0.4139 - dense_3_loss: 0.5222 - val_loss: 0.9468 - val_dense_2_loss: 0.4177 - val_dense_3_loss: 0.5290
Epoch 5/100
11610/11610 [==============================] - 2s 145us/sample - loss: 0.8777 - dense_2_loss: 0.3906 - dense_3_loss: 0.4870 - val_loss: 0.8977 - val_dense_2_loss: 0.4018 - val_dense_3_loss: 0.4957
Epoch 6/100
11610/11610 [==============================] - 2s 147us/sample - loss: 0.8543 - dense_2_loss: 0.3845 - dense_3_loss: 0.4694 - val_loss: 0.9032 - val_dense_2_loss: 0.4034 - val_dense_3_loss: 0.4997
Epoch 7/100
11610/11610 [==============================] - 2s 155us/sample - loss: 0.8293 - dense_2_loss: 0.3747 - dense_3_loss: 0.4548 - val_loss: 0.8396 - val_dense_2_loss: 0.3772 - val_dense_3_loss: 0.4622
Epoch 8/100
11610/11610 [==============================] - 2s 155us/sample - loss: 0.8050 - dense_2_loss: 0.3651 - dense_3_loss: 0.4397 - val_loss: 1.2402 - val_dense_2_loss: 0.7241 - val_dense_3_loss: 0.5158
Epoch 9/100
11610/11610 [==============================] - 2s 152us/sample - loss: 0.8028 - dense_2_loss: 0.3667 - dense_3_loss: 0.4361 - val_loss: 0.8167 - val_dense_2_loss: 0.3735 - val_dense_3_loss: 0.4431
Epoch 10/100
11610/11610 [==============================] - 2s 151us/sample - loss: 0.7761 - dense_2_loss: 0.3551 - dense_3_loss: 0.4213 - val_loss: 0.7896 - val_dense_2_loss: 0.3621 - val_dense_3_loss: 0.4273
Epoch 11/100
11610/11610 [==============================] - 2s 149us/sample - loss: 0.7621 - dense_2_loss: 0.3481 - dense_3_loss: 0.4137 - val_loss: 0.7703 - val_dense_2_loss: 0.3526 - val_dense_3_loss: 0.4176
Epoch 12/100
11610/11610 [==============================] - 2s 146us/sample - loss: 0.7544 - dense_2_loss: 0.3444 - dense_3_loss: 0.4100 - val_loss: 0.7740 - val_dense_2_loss: 0.3561 - val_dense_3_loss: 0.4178
Epoch 13/100
11610/11610 [==============================] - 2s 151us/sample - loss: 0.7446 - dense_2_loss: 0.3417 - dense_3_loss: 0.4028 - val_loss: 0.7511 - val_dense_2_loss: 0.3449 - val_dense_3_loss: 0.4060
Epoch 14/100
11610/11610 [==============================] - 2s 151us/sample - loss: 0.7382 - dense_2_loss: 0.3382 - dense_3_loss: 0.3999 - val_loss: 0.8029 - val_dense_2_loss: 0.3747 - val_dense_3_loss: 0.4281
Epoch 15/100
11610/11610 [==============================] - 2s 154us/sample - loss: 0.7337 - dense_2_loss: 0.3361 - dense_3_loss: 0.3978 - val_loss: 0.7656 - val_dense_2_loss: 0.3520 - val_dense_3_loss: 0.4134
Epoch 16/100
11610/11610 [==============================] - 2s 151us/sample - loss: 0.7284 - dense_2_loss: 0.3340 - dense_3_loss: 0.3943 - val_loss: 0.7299 - val_dense_2_loss: 0.3359 - val_dense_3_loss: 0.3939
Epoch 17/100
11610/11610 [==============================] - 2s 154us/sample - loss: 0.7208 - dense_2_loss: 0.3313 - dense_3_loss: 0.3895 - val_loss: 0.7223 - val_dense_2_loss: 0.3325 - val_dense_3_loss: 0.3897
Epoch 18/100
11610/11610 [==============================] - 2s 154us/sample - loss: 0.7197 - dense_2_loss: 0.3302 - dense_3_loss: 0.3894 - val_loss: 0.7287 - val_dense_2_loss: 0.3345 - val_dense_3_loss: 0.3941
Epoch 19/100
11610/11610 [==============================] - 2s 152us/sample - loss: 0.7352 - dense_2_loss: 0.3352 - dense_3_loss: 0.4000 - val_loss: 0.7341 - val_dense_2_loss: 0.3374 - val_dense_3_loss: 0.3966
Epoch 20/100
11610/11610 [==============================] - 2s 145us/sample - loss: 0.7095 - dense_2_loss: 0.3266 - dense_3_loss: 0.3829 - val_loss: 0.7415 - val_dense_2_loss: 0.3405 - val_dense_3_loss: 0.4008
Epoch 21/100
11610/11610 [==============================] - 2s 144us/sample - loss: 0.7100 - dense_2_loss: 0.3272 - dense_3_loss: 0.3829 - val_loss: 0.7808 - val_dense_2_loss: 0.3503 - val_dense_3_loss: 0.4305

在这里插入图片描述

Logo

CSDN联合极客时间,共同打造面向开发者的精品内容学习社区,助力成长!

更多推荐