def conv_forward_naive(x, w, b, conv_param):
"""
A naive implementation of the forward pass for a convolutional layer.
The input consists of N data points, each with C channels, height H and
width W. We convolve each input with F different filters, where each filter
spans all C channels and has height HH and width WW.
Input:
- x: Input data of shape (N, C, H, W)
- w: Filter weights of shape (F, C, HH, WW)
- b: Biases, of shape (F,)
- conv_param: A dictionary with the following keys:
- 'stride': The number of pixels between adjacent receptive fields in the
horizontal and vertical directions.
- 'pad': The number of pixels that will be used to zero-pad the input.
During padding, 'pad' zeros should be placed symmetrically (i.e equally on both sides)
along the height and width axes of the input. Be careful not to modfiy the original
input x directly.
Returns a tuple of:
- out: Output data, of shape (N, F, H', W') where H' and W' are given by
H' = 1 + (H + 2 * pad - HH) / stride
W' = 1 + (W + 2 * pad - WW) / stride
- cache: (x, w, b, conv_param)
"""
out = None
XN, XC, XH, XW = x.shape
WF, WC, WH, WW = w.shape
F = WF
stride, pad = conv_param['stride'], conv_param['pad']
if pad>0:
data = np.zeros((XN,XC,XH+2*pad,XW+2*pad))
data[:,:,pad:pad+XH,pad:pad+XW] = x
else:
data = x
XN, XC, XH, XW = data.shape
SH, SW = 1 + (XH - WH) // stride, 1 + (XW - WW) // stride
out = np.zeros((XN,F,SH,SW))
for IH in range(0,SH):
for IW in range(0,SW):
for IF in range(0,F):
for IN in range(0,XN):
out[IN,IF,IH,IW] = np.sum(data[IN,:,IH*stride:IH*stride+WH,IW*stride:IW*stride+WW]*w[IF,:,:,:])+b[IF]
pass
cache = (x, w, b, conv_param)
return out, cache
def conv_backward_naive(dout, cache):
"""
A naive implementation of the backward pass for a convolutional layer.
Inputs:
- dout: Upstream derivatives.
- cache: A tuple of (x, w, b, conv_param) as in conv_forward_naive
Returns a tuple of:
- dx: Gradient with respect to x
- dw: Gradient with respect to w
- db: Gradient with respect to b
"""
dx, dw, db = None, None, None
x, w, b, conv_param = cache
XN, XC, XH, XW = x.shape
WF, WC, WH, WW = w.shape
OH, OW = XH, XW
pad, stride = conv_param['pad'], conv_param['stride']
if pad>0:
data = np.zeros((XN,XC,XH+2*pad,XW+2*pad))
data[:,:,pad:pad+XH,pad:pad+XW] = x
XN, XC, XH, XW = data.shape
pass
else:
data = x
pass
SH, SW = 1+(XH-WH)//stride,1+(XW-WW)//stride
dx = np.zeros(data.shape)
for IN in range(0,XN):
for IF in range(0,WF):
for IH in range(0,SH):
for IW in range(0,SW):
dx[IN,:,IH*stride:IH*stride+WH,IW*stride:IW*stride+WW] += \
w[IF,:,:,:]*dout[IN,IF,IH,IW]
dx = dx[:,:,pad:pad+OH,pad:pad+OW]
dw = np.zeros(w.shape)
db = np.zeros(b.shape)
for IF in range(0,WF):
for IH in range(0,SH):
for IW in range(0,SW):
dw[IF,:,:,:] += \
np.sum(data[:,:,IH*stride:IH*stride+WH,IW*stride:IW*stride+WW]*dout[:,IF,IH,IW].reshape((XN,1,1,1)) ,axis=0)
db[IF] += np.sum(dout[:,IF,IH,IW],axis=0)
pass
return dx, dw, db
def max_pool_forward_naive(x, pool_param):
"""
A naive implementation of the forward pass for a max-pooling layer.
Inputs:
- x: Input data, of shape (N, C, H, W)
- pool_param: dictionary with the following keys:
- 'pool_height': The height of each pooling region
- 'pool_width': The width of each pooling region
- 'stride': The distance between adjacent pooling regions
No padding is necessary here. Output size is given by
Returns a tuple of:
- out: Output data, of shape (N, C, H', W') where H' and W' are given by
H' = 1 + (H - pool_height) / stride
W' = 1 + (W - pool_width) / stride
- cache: (x, pool_param)
"""
XN, XC, XH, XW = x.shape
WH, WW, stride = pool_param['pool_height'], pool_param['pool_width'], pool_param['stride']
SH, SW = 1+(XH-WH)//stride, 1+(XW-WW)//stride
out = None
out = np.zeros((XN,XC,SH,SW))
for IN in range(0,XN):
for IC in range(0,XC):
for IH in range(0,SH):
for IW in range(0,SW):
out[IN,IC,IH,IW] = np.max(x[IN,IC,IH*stride:IH*stride+WH,IW*stride:IW*stride+WW])
pass
cache = (x, pool_param)
return out, cache
def max_pool_backward_naive(dout, cache):
"""
A naive implementation of the backward pass for a max-pooling layer.
Inputs:
- dout: Upstream derivatives
- cache: A tuple of (x, pool_param) as in the forward pass.
Returns:
- dx: Gradient with respect to x
"""
dx = None
x, pool_param = cache
XN, XC, XH, XW = x.shape
WH, WW, stride = pool_param['pool_height'], pool_param['pool_width'], pool_param['stride']
SH, SW = 1 + (XH - WH) // stride, 1 + (XW - WW) // stride
dx = np.zeros(x.shape)
for IN in range(0,XN):
for IC in range(0,XC):
for IH in range(0,SH):
for IW in range(0,SW):
dx[IN,IC,IH*stride:IH*stride+WH,IW*stride:IW*stride+WW] += \
dout[IN,IC,IH,IW]*(x[IN,IC,IH*stride:IH*stride+WH,IW*stride:IW*stride+WW]==\
x[IN,IC,IH*stride:IH*stride+WH,IW*stride:IW*stride+WW].max())
pass
return dx
def spatial_batchnorm_forward(x, gamma, beta, bn_param):
"""
Computes the forward pass for spatial batch normalization.
Inputs:
- x: Input data of shape (N, C, H, W)
- gamma: Scale parameter, of shape (C,)
- beta: Shift parameter, of shape (C,)
- bn_param: Dictionary with the following keys:
- mode: 'train' or 'test'; required
- eps: Constant for numeric stability
- momentum: Constant for running mean / variance. momentum=0 means that
old information is discarded completely at every time step, while
momentum=1 means that new information is never incorporated. The
default of momentum=0.9 should work well in most situations.
- running_mean: Array of shape (D,) giving running mean of features
- running_var Array of shape (D,) giving running variance of features
Returns a tuple of:
- out: Output data, of shape (N, C, H, W)
- cache: Values needed for the backward pass
"""
out, cache = None, None
N, C, H, W = x.shape
x_in = x.transpose(0,2,3,1).reshape(-1,C)
out, cache = batchnorm_forward(x_in, gamma, beta, bn_param)
out = out.reshape(x.shape).transpose(0,3,1,2)
pass
return out, cache
def spatial_batchnorm_backward(dout, cache):
"""
Computes the backward pass for spatial batch normalization.
Inputs:
- dout: Upstream derivatives, of shape (N, C, H, W)
- cache: Values from the forward pass
Returns a tuple of:
- dx: Gradient with respect to inputs, of shape (N, C, H, W)
- dgamma: Gradient with respect to scale parameter, of shape (C,)
- dbeta: Gradient with respect to shift parameter, of shape (C,)
"""
dx, dgamma, dbeta = None, None, None
N, C, H, W = dout.shape
dup = dout.transpose(0,2,3,1).reshape(-1,C)
dx, dgamma, dbeta = batchnorm_backward(dup, cache)
dx = dx.reshape(dout.shape).transpose(0,3,1,2)
pass
return dx, dgamma, dbeta
def spatial_groupnorm_forward(x, gamma, beta, G, gn_param):
"""
Computes the forward pass for spatial group normalization.
In contrast to layer normalization, group normalization splits each entry
in the data into G contiguous pieces, which it then normalizes independently.
Per feature shifting and scaling are then applied to the data, in a manner identical to that of batch normalization and layer normalization.
Inputs:
- x: Input data of shape (N, C, H, W)
- gamma: Scale parameter, of shape (C,)
- beta: Shift parameter, of shape (C,)
- G: Integer number of groups to split into, should be a divisor of C
- gn_param: Dictionary with the following keys:
- eps: Constant for numeric stability
Returns a tuple of:
- out: Output data, of shape (N, C, H, W)
- cache: Values needed for the backward pass
"""
out, cache = None, None
eps = gn_param.get('eps',1e-5)
N, C, H, W = x.shape
x_in = x.reshape(N,G,C//G,H,W)
x_in = x_in.reshape(N,G,-1)
N, G, F = x_in.shape
x_in = x_in.reshape(-1,F)
x_in = x_in.T
x = x_in
sample_mean = np.mean(x, axis=0)
sample_var = np.var(x, axis=0)
x_out = (x - sample_mean) / np.sqrt(sample_var + eps)
out = x_out
NX = x.shape[0]
mu = (1.0 / float(NX)) * np.sum(x, axis=0)
x_minus_mu = x - mu
x_minus_mu_square = (x_minus_mu) ** 2
var = ((1.0 / float(NX))) * np.sum(x_minus_mu_square, axis=0)
sqrt_var = 1.0 * np.sqrt(var + eps)
sqrt_var_invert = 1.0 / sqrt_var
x_norm = x_minus_mu / sqrt_var
out = out.T
out = out.reshape(N, G, F)
out = out.reshape(N, G, C//G, H, W)
out_pre = out.reshape(N, C, H, W)
out = out_pre*gamma+beta
cache = (NX, mu, x_minus_mu, x_minus_mu_square, var, sqrt_var, sqrt_var_invert, x_norm, eps, gamma,G,F,out_pre)
pass
return out, cache
def spatial_groupnorm_backward(dout, cache):
"""
Computes the backward pass for spatial group normalization.
Inputs:
- dout: Upstream derivatives, of shape (N, C, H, W)
- cache: Values from the forward pass
Returns a tuple of:
- dx: Gradient with respect to inputs, of shape (N, C, H, W)
- dgamma: Gradient with respect to scale parameter, of shape (C,)
- dbeta: Gradient with respect to shift parameter, of shape (C,)
"""
dx, dgamma, dbeta = None, None, None
NX, mu, x_minus_mu, x_minus_mu_square, var, sqrt_var, sqrt_var_invert, x_norm, eps, gamma,G,F,out_pre = cache
N, C, H, W = dout.shape
dgamma = np.sum(out_pre * dout, axis=(0,2,3),keepdims=True)
dbeta = np.sum(dout, axis=(0,2,3),keepdims=True)
o = dout * gamma
o = o.reshape(N, G, C // G, H, W)
o = o.reshape(N, G, -1)
N, G, F = o.shape
o = o.reshape(-1, F)
o = o.T
print(NX)
dx = (1.0 / NX) * sqrt_var_invert * (
NX * o - np.sum(o * x_norm, axis=0, keepdims=True) * x_norm - np.sum(o, axis=0, keepdims=True))
dx = (dx.T)
dx = dx.reshape(N,G,F)
dx = dx.reshape(N,G,C//G,-1)
dx = dx.reshape(N,G,C//G,H,W)
dx = dx.reshape(N,C,H,W)
pass
return dx, dgamma, dbeta