当前位置: 移动技术网 > IT编程>脚本编程>Python > 第四届工业大数据创新大赛代码(自写)

第四届工业大数据创新大赛代码(自写)

2020年09月28日  | 移动技术网IT编程  | 我要评论
import tensorflow as tfimport matplotlib.pyplot as plt%matplotlib inlineimport pandas as pdimport numpy as np# 这是个回归问题,参考波士顿房价预测# 数据预处理 x_train = np.zeros((16600, 39)) y_train = np.zeros((16600, 3)) x_test = np.zeros((3953, 39)) df_x
import tensorflow as tf import matplotlib.pyplot as plt %matplotlib inline import pandas as pd import numpy as np
# 这是个回归问题,参考波士顿房价预测
# 数据预处理
 x_train = np.zeros((16600, 39)) y_train = np.zeros((16600, 3)) x_test = np.zeros((3953, 39)) df_x_train = pd.read_csv('my_train.csv') df_x_test = pd.read_csv('my_test.csv') df_y_train = pd.read_csv('label.csv') df_x_train.drop('Id', axis=1, inplace=True) df_x_test.drop('Id', axis=1, inplace=True) df_y_train.drop('Id', axis=1, inplace=True) x_train = np.array(df_x_train) y_train = np.array(df_y_train) x_test = np.array(df_x_test)
# 运行代码
# 加载数据 # 模型 model = tf.keras.Sequential() model.add(tf.keras.layers.Dense(units=16, activation='relu', input_shape=(x_train.shape[1],))) model.add(tf.keras.layers.Dropout(0.1)) model.add(tf.keras.layers.Dense(units=10, activation='relu', kernel_initializer='normal')) model.add(tf.keras.layers.Dropout(0.2)) model.add(tf.keras.layers.Dense(units=3, activation='relu')) model.add(tf.keras.layers.Dense(units=3, activation='linear')) # 线性激励函数 回归一般在输出层用这个激励函数 print(model.summary()) # 打印网络层次结构 model.compile(loss='mse', # 损失均方误差 optimizer='adam', # 优化器 metrics=["mae"] #衡量指标 ) history = model.fit(x_train, y_train, epochs=20000, # 迭代次数 batch_size=200, # 每次用来梯度下降的批处理数据大小 verbose=2, # verbose:日志冗长度,int:冗长度,0:不输出训练过程,1:输出训练进度,2:输出每一个epoch validation_split=0.02 # 验证集 ) 
Model: "sequential"
_________________________________________________________________
Layer (type)                 Output Shape              Param #   
=================================================================
dense (Dense)                (None, 16)                640       
_________________________________________________________________
dropout (Dropout)            (None, 16)                0         
_________________________________________________________________
dense_1 (Dense)              (None, 10)                170       
_________________________________________________________________
dropout_1 (Dropout)          (None, 10)                0         
_________________________________________________________________
dense_2 (Dense)              (None, 3)                 33        
_________________________________________________________________
dense_3 (Dense)              (None, 3)                 12        
=================================================================
Total params: 855
Trainable params: 855
Non-trainable params: 0
_________________________________________________________________
None
Epoch 1/20000
82/82 - 0s - loss: 49150.1914 - mae: 206.3641 - val_loss: 38809.3984 - val_mae: 163.0585
Epoch 2/20000
82/82 - 0s - loss: 36806.1641 - mae: 171.4661 - val_loss: 30192.1992 - val_mae: 149.0044
Epoch 3/20000
82/82 - 0s - loss: 28210.9570 - mae: 150.4639 - val_loss: 20814.5762 - val_mae: 126.8148
Epoch 4/20000
82/82 - 0s - loss: 19452.6250 - mae: 121.1621 - val_loss: 11826.9873 - val_mae: 94.2418
Epoch 5/20000
82/82 - 0s - loss: 11912.6367 - mae: 92.8348 - val_loss: 5986.2295 - val_mae: 60.5107
Epoch 6/20000
82/82 - 0s - loss: 7124.3657 - mae: 71.8077 - val_loss: 2901.4902 - val_mae: 36.6146
Epoch 7/20000
82/82 - 0s - loss: 4116.0684 - mae: 50.5229 - val_loss: 827.2214 - val_mae: 23.7817
Epoch 8/20000
82/82 - 0s - loss: 3336.4146 - mae: 43.7556 - val_loss: 791.5867 - val_mae: 22.3812
Epoch 9/20000
82/82 - 0s - loss: 3032.6638 - mae: 41.4055 - val_loss: 1049.2428 - val_mae: 27.2894
Epoch 10/20000
82/82 - 0s - loss: 2719.1489 - mae: 38.7898 - val_loss: 1240.1924 - val_mae: 31.4957
Epoch 11/20000
82/82 - 0s - loss: 2311.5171 - mae: 35.5665 - val_loss: 1227.0610 - val_mae: 33.5609
Epoch 12/20000
82/82 - 0s - loss: 2165.0784 - mae: 33.8525 - val_loss: 1012.7498 - val_mae: 31.1721
Epoch 13/20000
82/82 - 0s - loss: 1887.0238 - mae: 31.4864 - val_loss: 1390.1809 - val_mae: 37.0801
Epoch 14/20000
82/82 - 0s - loss: 1691.2812 - mae: 29.0540 - val_loss: 1707.7378 - val_mae: 41.1535
Epoch 15/20000
82/82 - 0s - loss: 1532.8098 - mae: 26.5527 - val_loss: 1939.7997 - val_mae: 43.7536
Epoch 16/20000
82/82 - 0s - loss: 1358.6038 - mae: 24.2644 - val_loss: 1993.3639 - val_mae: 44.2045
Epoch 17/20000
82/82 - 0s - loss: 1190.1053 - mae: 22.2104 - val_loss: 2374.3965 - val_mae: 48.0460
Epoch 18/20000
82/82 - 0s - loss: 1136.8602 - mae: 21.0991 - val_loss: 2669.3945 - val_mae: 50.7997
Epoch 19/20000
82/82 - 0s - loss: 1059.8214 - mae: 20.2722 - val_loss: 2935.2559 - val_mae: 53.1788
Epoch 20/20000
82/82 - 0s - loss: 1014.5886 - mae: 19.3228 - val_loss: 3280.3130 - val_mae: 56.1718
Epoch 21/20000
82/82 - 0s - loss: 962.3523 - mae: 18.3778 - val_loss: 3755.1272 - val_mae: 60.0751
Epoch 22/20000
82/82 - 0s - loss: 954.1680 - mae: 17.8203 - val_loss: 3934.1104 - val_mae: 61.4836
Epoch 23/20000
82/82 - 0s - loss: 881.3633 - mae: 16.7048 - val_loss: 4056.0300 - val_mae: 62.4287
Epoch 24/20000
82/82 - 0s - loss: 868.2583 - mae: 16.6115 - val_loss: 3914.4895 - val_mae: 61.3268
Epoch 25/20000
82/82 - 0s - loss: 836.7545 - mae: 16.1454 - val_loss: 4258.8887 - val_mae: 63.9680
Epoch 26/20000
82/82 - 0s - loss: 840.1174 - mae: 16.0608 - val_loss: 3932.4329 - val_mae: 61.4714
Epoch 27/20000
82/82 - 0s - loss: 864.1089 - mae: 16.3016 - val_loss: 3843.9194 - val_mae: 60.7707
Epoch 28/20000
82/82 - 0s - loss: 824.6353 - mae: 16.0842 - val_loss: 3639.5659 - val_mae: 59.1350
Epoch 29/20000
82/82 - 0s - loss: 800.4683 - mae: 15.9703 - val_loss: 4056.4714 - val_mae: 62.4295
Epoch 30/20000
82/82 - 0s - loss: 791.3029 - mae: 15.8931 - val_loss: 3875.6846 - val_mae: 61.0247
Epoch 31/20000
82/82 - 0s - loss: 806.9557 - mae: 15.9177 - val_loss: 3927.4924 - val_mae: 61.4339
Epoch 32/20000
82/82 - 0s - loss: 770.6181 - mae: 15.9284 - val_loss: 4141.4375 - val_mae: 63.0835
Epoch 33/20000
82/82 - 0s - loss: 765.0216 - mae: 15.6597 - val_loss: 3646.2839 - val_mae: 59.1921
Epoch 34/20000
82/82 - 0s - loss: 762.2557 - mae: 15.6613 - val_loss: 4062.6304 - val_mae: 62.4781
Epoch 35/20000
82/82 - 0s - loss: 761.6378 - mae: 15.7607 - val_loss: 4207.3052 - val_mae: 63.5839
Epoch 36/20000
82/82 - 0s - loss: 730.6315 - mae: 15.4276 - val_loss: 4226.1860 - val_mae: 63.7284
Epoch 37/20000
82/82 - 0s - loss: 729.6150 - mae: 15.3077 - val_loss: 4009.0208 - val_mae: 62.0652
Epoch 38/20000
82/82 - 0s - loss: 745.9162 - mae: 15.5609 - val_loss: 3945.7375 - val_mae: 61.5765
Epoch 39/20000



---------------------------------------------------------------------------

KeyboardInterrupt                         Traceback (most recent call last)

<ipython-input-6-934c7f8ecd18> in <module>
     22           batch_size=200,  # 每次用来梯度下降的批处理数据大小
     23           verbose=2,  # verbose:日志冗长度,int:冗长度,0:不输出训练过程,1:输出训练进度,2:输出每一个epoch
---> 24           validation_split=0.02  # 验证集
     25         )


c:\users\25838\miniconda3\lib\site-packages\tensorflow\python\keras\engine\training.py in _method_wrapper(self, *args, **kwargs)
    106   def _method_wrapper(self, *args, **kwargs):
    107     if not self._in_multi_worker_mode():  # pylint: disable=protected-access
--> 108       return method(self, *args, **kwargs)
    109 
    110     # Running inside `run_distribute_coordinator` already.


c:\users\25838\miniconda3\lib\site-packages\tensorflow\python\keras\engine\training.py in fit(self, x, y, batch_size, epochs, verbose, callbacks, validation_split, validation_data, shuffle, class_weight, sample_weight, initial_epoch, steps_per_epoch, validation_steps, validation_batch_size, validation_freq, max_queue_size, workers, use_multiprocessing)
   1096                 batch_size=batch_size):
   1097               callbacks.on_train_batch_begin(step)
-> 1098               tmp_logs = train_function(iterator)
   1099               if data_handler.should_sync:
   1100                 context.async_wait()


c:\users\25838\miniconda3\lib\site-packages\tensorflow\python\eager\def_function.py in __call__(self, *args, **kwds)
    778       else:
    779         compiler = "nonXla"
--> 780         result = self._call(*args, **kwds)
    781 
    782       new_tracing_count = self._get_tracing_count()


c:\users\25838\miniconda3\lib\site-packages\tensorflow\python\eager\def_function.py in _call(self, *args, **kwds)
    805       # In this case we have created variables on the first call, so we run the
    806       # defunned version which is guaranteed to never create variables.
--> 807       return self._stateless_fn(*args, **kwds)  # pylint: disable=not-callable
    808     elif self._stateful_fn is not None:
    809       # Release the lock early so that multiple threads can perform the call


c:\users\25838\miniconda3\lib\site-packages\tensorflow\python\eager\function.py in __call__(self, *args, **kwargs)
   2827     with self._lock:
   2828       graph_function, args, kwargs = self._maybe_define_function(args, kwargs)
-> 2829     return graph_function._filtered_call(args, kwargs)  # pylint: disable=protected-access
   2830 
   2831   @property


c:\users\25838\miniconda3\lib\site-packages\tensorflow\python\eager\function.py in _filtered_call(self, args, kwargs, cancellation_manager)
   1846                            resource_variable_ops.BaseResourceVariable))],
   1847         captured_inputs=self.captured_inputs,
-> 1848         cancellation_manager=cancellation_manager)
   1849 
   1850   def _call_flat(self, args, captured_inputs, cancellation_manager=None):


c:\users\25838\miniconda3\lib\site-packages\tensorflow\python\eager\function.py in _call_flat(self, args, captured_inputs, cancellation_manager)
   1922       # No tape is watching; skip to running the function.
   1923       return self._build_call_outputs(self._inference_function.call(
-> 1924           ctx, args, cancellation_manager=cancellation_manager))
   1925     forward_backward = self._select_forward_and_backward_functions(
   1926         args,


c:\users\25838\miniconda3\lib\site-packages\tensorflow\python\eager\function.py in call(self, ctx, args, cancellation_manager)
    548               inputs=args,
    549               attrs=attrs,
--> 550               ctx=ctx)
    551         else:
    552           outputs = execute.execute_with_cancellation(


c:\users\25838\miniconda3\lib\site-packages\tensorflow\python\eager\execute.py in quick_execute(op_name, num_outputs, inputs, attrs, ctx, name)
     58     ctx.ensure_initialized()
     59     tensors = pywrap_tfe.TFE_Py_Execute(ctx._handle, device_name, op_name,
---> 60                                         inputs, attrs, num_outputs)
     61   except core._NotOkStatusException as e:
     62     if name is not None:


KeyboardInterrupt: 
# 平均绝对误差(MAE,mean absolute error)。它是预测值与目标值之差的绝对值。
plt.plot(history.epoch,history.history.get("loss"),label="loss") plt.plot(history.epoch,history.history.get("val_loss"),label="val_loss") plt.legend()
plt.plot(history.epoch,history.history.get("mae"),label="mae") plt.plot(history.epoch,history.history.get("val_mae"),label="val_mae") plt.legend()

本文地址:https://blog.csdn.net/wojiaoawenlong/article/details/108850773

如您对本文有疑问或者有任何想说的,请点击进行留言回复,万千网友为您解惑!

相关文章:

验证码:
移动技术网