- activationBackward
void activationBackward(Variable!(T, dim, DeviceStorage) gx, Variable!(T, dim, DeviceStorage) gy, Variable!(T, dim, DeviceStorage) x, Variable!(T, dim, DeviceStorage) y, T alpha, T beta, double coeff)
grad function of sigmoid/tanh ... etc wrapper
- activationForward
void activationForward(Variable!(T, dim, DeviceStorage) x, Variable!(T, dim, DeviceStorage) y, T alpha, T beta, double coeff)
y = alpha * f(x) + beta * y
- contiguous
auto contiguous(Variable!(T, dim, DeviceStorage) x)
Undocumented in source. Be warned that the author may not have intended to support it.
- convBackward
void convBackward(Variable!(T, dim, DeviceStorage) gradInput, Variable!(T, dim, DeviceStorage) input, Variable!(T, dim, DeviceStorage) gradFilter, Variable!(T, dim, DeviceStorage) filter, Variable!(T, dim, DeviceStorage) gradOutput, int[imDims] stride, int[imDims] pad, int[imDims] dilation, int ngroup, cudnnConvolutionBwdDataAlgo_t algo, float alpha, float beta)
wrapper of cudnnConvolutionBackwardData and Weight for Variable
- convForward
void convForward(Variable!(T, dim, DeviceStorage) input, Variable!(T, dim, DeviceStorage) filter, Variable!(T, dim, DeviceStorage) output, int[imDims] stride, int[imDims] pad, int[imDims] dilation, int ngroup, cudnnConvolutionFwdAlgo_t algo, float alpha, float beta)
wrapper of cudnnConvolutionForward for Variable
- cudnnDataType
auto cudnnDataType()
convert floating point types (float, double) into cudnn enum
- fill
void fill(Variable!(T, dim, DeviceStorage) x, T value)
x[] = value (WARNING: not tested)
- isContiguous
bool isContiguous(Variable!(T, dim, Storage) x)
- isDeterministic
auto isDeterministic()
- isNanProp
auto isNanProp()
return global cudnn option
- makeCudnnTensor
auto makeCudnnTensor(Variable!(T, dim, DeviceStorage) x)
convert variable to cudnn tensor discriptor object
- makeCudnnTensor
auto makeCudnnTensor(T storage)
convert contiguous cuda storage to 1-D tensor disc
- poolBackward
void poolBackward(Variable!(T, _tensorDims, DeviceStorage) gradInput, Variable!(T, _tensorDims, DeviceStorage) input, Variable!(T, _tensorDims, DeviceStorage) gradOutput, Variable!(T, _tensorDims, DeviceStorage) output, int[_poolDims] windowDim, int[_poolDims] padding, int[_poolDims] stride, T alpha, T beta)
wrapper of cudnnPoolingBackward for Variable
- poolForward
auto poolForward(Variable!(T, _tensorDims, DeviceStorage) input, int[_poolDims] windowDim, int[_poolDims] padding, int[_poolDims] stride, T alpha, T beta)
wrapper of cudnnPoolingForward for Variable
- reduce
void reduce(Variable!(T, dim, DeviceStorage) src, Variable!(T, dim, DeviceStorage) dst, T alpha, T beta)
Tensor operation : C = reduce op( alpha * A ) + beta * C
- scale
void scale(Variable!(T, dim, DeviceStorage) x, T alpha)
- softmaxBackward
void softmaxBackward(Variable!(T, dim, DeviceStorage) gx, Variable!(T, dim, DeviceStorage) gy, Variable!(T, dim, DeviceStorage) y, T alpha, T beta)
- softmaxForward
void softmaxForward(Variable!(T, dim, DeviceStorage) x, Variable!(T, dim, DeviceStorage) y, T alpha, T beta)
compute the softmax over all C for each H, W, N
- tensorOp
void tensorOp(Variable!(T, dim, DeviceStorage) c, Variable!(T, dim, DeviceStorage) a, Variable!(T, dim, DeviceStorage) b, T alpha1, T alpha2, T beta)
Tensor operation : C = op( alpha1 * A, alpha2 * B ) + beta * C
- transform
void transform(Variable!(T, dim, DeviceStorage) src, Variable!(T, dim, DeviceStorage) dst, T alpha, T beta)
copy src to dst with broadcasting
cuDNN high level wrapper for grain.autograd.Variable
TODO: support global workspace instead of frequent allocation