07_deeplearning_RELU_function
.png)
import tensorflow as tf
from tensorflow.keras import Sequential
from tensorflow.keras.layers import Dense
model = Sequential([
Dense(units=25,activation='sigmoid'),
Dense(units=15,activation='sigmoid'),
Dense(units=1,activation='sigmoid'),
]) #construct the neural network
from tensorflow.keras.losser import BinaryCrossentropy
model.compile(loss = BinaryCrossentrophy()) #choose what loss function is
model.fit(X,Y,epochs = 100) #epochs: number of steps in gradient descent
#model.fit() : make the J(x,y),cost function minimize
.png)
model.compile(loss= )
#use to choose what loss function
model.fit() :use to train on data to minimize J(w,b)
.png)
.png)
.png)
ReLU function:Rectified Linear Unit
when the z is all z>0 ,we should choose the ReLU
.png)
so what activation function should we use?
.png)
.png)
.png)
from tf.keras.layers import Dense
model = Sequential([
Dense(units=25,activation='relu'),
Dense(units=15,activation='relu'),
Dense(units=1,activation='sigmoid')
])
.png)
we should never choose the linear function to be the middle layer of the neural networks,because in essence, the neural network's function is same as the output activation function, so the middle layer is useless.
.png)
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.gridspec import GridSpec
plt.style.use('./deeplearning.mplstyle')
import tensorflow as tf
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, LeakyReLU
from tensorflow.keras.activations import linear, relu, sigmoid
%matplotlib widget
from matplotlib.widgets import Slider
from lab_utils_common import dlc
from autils import plt_act_trio
from lab_utils_relu import *
import warnings
warnings.simplefilter(action='ignore', category=UserWarning)
plt_act_trio()
.png)
.png)
so we can use many different relu function to make a non-linear function.
.png)

浙公网安备 33010602011771号