store grain.autograd.BackProp object in returned variables from forward function
type-erased version of backward function used in grain.autograd.BackProp object
import std.typecons; import grain.testing; import numir; import mir.ndslice; AddBias!float func; auto hx = [[0f, 1f], [2f, 3f], [4f, 5f]].variable; // 3x2 auto hb = [-1f, 1f].variable; // 2 auto hy = func.forward(hx, hb); assert(hy.sliced == [[-1f, 2f], [1f, 4f], [3f, 6f]]); auto hgy = uniform!float(hy.shape.castArray!size_t).slice.variable; auto hgxb = func.backward(hgy); assert(hgxb[0].sliced == hgy.sliced); assert(hgxb[1].sliced == [hgy.sliced[0, 0] + hgy.sliced[1, 0] + hgy.sliced[2, 0], hgy.sliced[0, 1] + hgy.sliced[1, 1] + hgy.sliced[2, 1]]); gradCheck(func, tuple(hx, hb), hgy); version (grain_cuda) { auto dx = hx.to!DeviceStorage; auto db = hb.to!DeviceStorage; auto dy = func.forward(dx, db); assert(dy.to!HostStorage.sliced == [[-1f, 2f], [1f, 4f], [3f, 6f]]); auto dgy = hgy.to!DeviceStorage; auto dgxb = func.backward(dgy); assert(dgxb[0].to!HostStorage.sliced == hgxb[0].sliced); assert(dgxb[1].to!HostStorage.sliced == hgxb[1].sliced); }
Add bias vector to matrix used inside grain.chain.Linear TODO: generalize to broadcastable addition (use cudnnAddTensor)