AddBias

Add bias vector to matrix used inside grain.chain.Linear TODO: generalize to broadcastable addition (use cudnnAddTensor)

Members

Functions

backward
auto backward(Variable!(T, 2, HostStorage) gy)
Undocumented in source. Be warned that the author may not have intended to support it.
backward
auto backward(Variable!(T, 2, DeviceStorage) gy)
Undocumented in source. Be warned that the author may not have intended to support it.
forward
auto forward(Variable!(T, 2, HostStorage) a, Variable!(T, 1, HostStorage) b)
Undocumented in source. Be warned that the author may not have intended to support it.
forward
auto forward(Variable!(T, 2, DeviceStorage) a, Variable!(T, 1, DeviceStorage) b)
Undocumented in source. Be warned that the author may not have intended to support it.

Mixins

__anonymous
mixin FunctionCommon
Undocumented in source.

Mixed In Members

From mixin FunctionCommon

this(this)
this(this)
Undocumented in source.
DeviceRets
alias DeviceRets = Tuple!(Parameters!backward)
Undocumented in source.
DeviceArgs
alias DeviceArgs = Tuple!(Parameters!forward)
Undocumented in source.
__anonymous
mixin TypeChecker!(forward, backward)
Undocumented in source.
_mixin_dargs
DeviceArgs _mixin_dargs;
Undocumented in source.
HostRets
alias HostRets = Tuple!(Parameters!backward)
Undocumented in source.
HostArgs
alias HostArgs = Tuple!(Parameters!forward)
Undocumented in source.
__anonymous
mixin TypeChecker!(forward, backward)
Undocumented in source.
_mixin_hargs
HostArgs _mixin_hargs;
Undocumented in source.
applyForward
auto applyForward(Args args)

store grain.autograd.BackProp object in returned variables from forward function

applyBackward
void applyBackward(UntypedVariable[] ugradOutputs)

type-erased version of backward function used in grain.autograd.BackProp object

Examples

import std.typecons;
import grain.testing;
import numir;
import mir.ndslice;

AddBias!float func;
auto hx = [[0f, 1f], [2f, 3f], [4f, 5f]].variable; // 3x2
auto hb = [-1f, 1f].variable; // 2
auto hy = func.forward(hx, hb);
assert(hy.sliced == [[-1f, 2f], [1f, 4f], [3f, 6f]]);

auto hgy = uniform!float(hy.shape.castArray!size_t).slice.variable;
auto hgxb = func.backward(hgy);
assert(hgxb[0].sliced == hgy.sliced);
assert(hgxb[1].sliced == [hgy.sliced[0, 0] + hgy.sliced[1, 0] + hgy.sliced[2, 0],
                          hgy.sliced[0, 1] + hgy.sliced[1, 1] + hgy.sliced[2, 1]]);
gradCheck(func, tuple(hx, hb), hgy);

version (grain_cuda) {
    auto dx = hx.to!DeviceStorage;
    auto db = hb.to!DeviceStorage;
    auto dy = func.forward(dx, db);
    assert(dy.to!HostStorage.sliced == [[-1f, 2f], [1f, 4f], [3f, 6f]]);
    auto dgy = hgy.to!DeviceStorage;
    auto dgxb = func.backward(dgy);
    assert(dgxb[0].to!HostStorage.sliced == hgxb[0].sliced);
    assert(dgxb[1].to!HostStorage.sliced == hgxb[1].sliced);
}

Meta