Neuarl nets are universal function approximators. In this post, we make an empirical tour on some of the theorems that address neural nets’ expressiveness.

We will verify two theorems, the first one states that a net with a wide-enough hidden layer is a universal function approximator (Cybenko, 1989).

The second states that ResNet with one-neuron hidden layers is a Universal Approximator as shown here. In other words, fully connected networks are not universal approximators if their width is the input dimension $n$. However, with ResNet, this is possible.

We are going to do this with pure numpy implementation, demonstrating it a classification problem depicted in the figure below. So let’s first implement some functions (we are going to implement our own autdiff, we could do the same with jax/pytorch or tensorflow, but this is more fun)

import numpy as np
import matplotlib.pyplot as plt
# classification problem
np.random.seed(5)
num_pts = 500
X = np.random.randn(num_pts, 2) * 1.5
R = np.sum(X * X, axis=1)
X = X[np.logical_or(R > 2, R < 1), :]
num_pts = X.shape[0]
Y = (np.sum(X * X, axis=1) < 1).astype(float)
plot(X, Y, label='Ground Truth')



# implementation of layers, nets for the experiment
# each class has a forward (evalution), backward (computation of gradients and accumulating them),
# and step (updating the parameters)
class LinearLayer(object):
    def __init__(self, m=10, n=2):
        self.m = m
        self.n = n
        self.A = np.random.randn(m, n) * 0.25
        self.b = np.random.randn(m) * 0.25
        self.i = None
        self.o = None
        self.dA = np.zeros(self.A.shape)
        self.db = np.zeros(self.b.shape)
        
    def forward(self, _in):
        self.i = _in
        self.o = np.dot(self.A, _in) + self.b
        return self.o
    
    def backward(self, do):
        di = np.dot(self.A.T, do)
        _dA = np.vstack([self.i.T] * self.m)
        self.dA += _dA  * do[:, None]
        self.db += np.ones(self.m) * do
        return di
    
    def reset_grad(self):
        self.dA = np.zeros(self.A.shape)
        self.db = np.zeros(self.b.shape)

    def step(self, step=0.01):
        self.A -= step * self.dA
        self.b -= step * self.db
        self.reset_grad()
    

class ReLuLayer(object):
    def __init__(self, m=10, n=2):
        self.m = m
        self.n = n
        self.linear = LinearLayer(m=m, n=n)
        self.i = None
        self.o = None
        
    def forward(self, _in):
        self.i = _in
        self.o = self.linear.forward(self.i)
        self.o[self.o < 0] = 0.
        return self.o
    
    def backward(self, do):
        do[self.o < 0] = 0.
        di = self.linear.backward(do)
        return di

    def step(self, step=0.01):
        self.linear.step(step=step)  
    

class OneHiddenNet(object):
    def __init__(self, w=10, n=2):
        """
        w : hidden layer width 
        """
        self.w = w
        self.n = n
        self.relu = ReLuLayer(m=w, n=n)
        self.linear = LinearLayer(m=1, n=w)
        self.i = None
        self.o = None
        
    def forward(self, _in):
        self.i = _in
        self.o = self.linear.forward(self.relu.forward(self.i))
        return self.o
    
    def backward(self, do):
        di = self.linear.backward(do)
        di = self.relu.backward(di)
        return di

    def step(self, step=0.1):
        self.linear.step(step=step)
        self.relu.step(step=step)
    
    
class ResNetLayer(object):
    def __init__(self, n=2):
        self.n = n
        self.relu = ReLuLayer(m=1, n=n)
        self.i = None
        self.o = None
        
    def forward(self, _in):
        self.i = _in
        self.o = self.relu.forward(self.i) + self.i
        return self.o
    
    def backward(self, do):
        di = self.relu.backward(do[0,None])
        di += do
        return di

    def step(self, step=0.1):
        self.relu.step(step=step) 
    

class ResNet(object):
    def __init__(self, d=3, n=2):
        """
        d: depth
        n: input
        """
        assert d > 0
        self.d = d 
        self.n = n
        self.layers = [ResNetLayer(n=n) for _ in range(d)]
        self.linear = LinearLayer(m=1, n=n)
        self.i = None
        self.o = None
        
    def forward(self, _in):
        self.i = _in
        for layer in self.layers:
            _in = layer.forward(_in)
        o = self.linear.forward(_in)
        return o
    
    def backward(self, do):
        do = self.linear.backward(do)
        for layer in self.layers[::-1]:
            do = layer.backward(do)
        return do
    
    def step(self, step=0.001):
        self.linear.step(step=step)
        for layer in self.layers[::-1]:
            layer.step(step=step)
    

class ReLuNet(object):
    def __init__(self, d=3, n=2, m=2):
        """
        d: depth
        n: input
        """
        assert d > 0
        self.d = d 
        self.n = n
        self.layers = [ReLuLayer(m=m, n=n)] + [ReLuLayer(m=m, n=m) for _ in range(d-1)]
        self.linear = LinearLayer(m=1, n=m)
        self.i = None
        self.o = None
        
    def forward(self, _in):
        self.i = _in
        for layer in self.layers:
            _in = layer.forward(_in)
        o = self.linear.forward(_in)
        return o
    
    def backward(self, do):
        do = self.linear.backward(do)
        for layer in self.layers[::-1]:
            do = layer.backward(do)
        return do
    
    def step(self, step=0.001):
        self.linear.step(step=step)
        for layer in self.layers[::-1]:
            layer.step(step=step)
def plot(X, y, label=None):
    plt.clf()
    plt.scatter(X[y==0,0], X[y==0,1], c='r')
    plt.scatter(X[y==1,0], X[y==1,1], c='b')
    plt.title(label)
    plt.show()
    
def sigmoid(z):
    return np.exp(z) / (1 + np.exp(z))

def train_net(net, epochs=100, batch_size=100, step=0.01):
    m = Y.shape[0]
    num_batches = (m + batch_size - 1) // batch_size
    for _ in range(epochs):
        total_loss = 0
        for batch in range(num_batches):
            istart = batch * batch_size
            iend = min(m, (batch + 1) * batch_size)
            loss = 0
            for _ in range(istart, iend):
                x = X[_, :]
                y = Y[_]
                z = net.forward(x)
                y_pred = sigmoid(z)
                # cross entropy loss
                loss += - y * z + np.log(1 + np.exp(z))
                dloss = np.exp(z) / (1 + np.exp(z)) - y
                #print(dloss)
                #_do = np.array([2 * (y_pred[0] - Y[0]), 2 * (y_pred[1] - Y[1])])
                net.backward(dloss)
            total_loss = loss
            #print("loss per batch: {}".format(loss / batch_size))
            net.step(step=step)
        print("=====loss per epoch: {}".format(total_loss / m))
    
def net_predict(net):
    return (np.vstack([net.forward(x) for x in X]) > 0.5).astype(int)[:,0]
# Let's verify that a very wide one-hidden net is a function approximator (at least for our classification task)
np.random.seed(1)
ohnet = OneHiddenNet(w=200, n=2)
train_net(ohnet, epochs=20)
plot(X, net_predict(ohnet), label='One-Hidden-Net')
print("Verified!")
=====loss per epoch: [0.00817961]
=====loss per epoch: [0.00384177]
=====loss per epoch: [0.00359306]
=====loss per epoch: [0.00243583]
=====loss per epoch: [0.00220449]
=====loss per epoch: [0.00118371]
=====loss per epoch: [0.00113231]
=====loss per epoch: [0.0010856]
=====loss per epoch: [0.00104092]
=====loss per epoch: [0.00100063]
=====loss per epoch: [0.00096412]



Verified!
# Let's do the same for a RelU with width = n but with multiple hidden layers
np.random.seed(1)
ohnet = ReLuNet(d=4, n=2, m=3)
train_net(ohnet, step=0.001, epochs=1000)
plot(X, net_predict(ohnet), label='ReLuNet-w=n')

=====loss per epoch: [0.01638199]
=====loss per epoch: [0.01564627]
=====loss per epoch: [0.0150567]
=====loss per epoch: [0.01458095]
=====loss per epoch: [0.0141948]
=====loss per epoch: [0.01387983]
=====loss per epoch: [0.01362182]
=====loss per epoch: [0.01340963]
=====loss per epoch: [0.01230008]
=====loss per epoch: [0.01230008]
=====loss per epoch: [0.01230008]
=====loss per epoch: [0.01230008]
=====loss per epoch: [0.01230008]
=====loss per epoch: [0.01230008]
=====loss per epoch: [0.01230008]
=====loss per epoch: [0.01229125]
=====loss per epoch: [0.01228903]
=====loss per epoch: [0.01228637]



# Let's do the same for a Resnet with width = n but with multiple hidden layers
np.random.seed(1)
rnet = ResNet(d=4, n=2)
train_net(rnet, step=0.001, epochs=400)
plot(X, net_predict(rnet), label='ResNet-w=n')

=====loss per epoch: [0.01337021]
=====loss per epoch: [0.01243063]
=====loss per epoch: [0.01195057]
=====loss per epoch: [0.01165113]
=====loss per epoch: [0.01144874]
=====loss per epoch: [0.01130361]
=====loss per epoch: [0.01119548]
=====loss per epoch: [0.01111322]
=====loss per epoch: [0.01041971]
=====loss per epoch: [0.01040687]
=====loss per epoch: [0.00461871]
=====loss per epoch: [0.00461584]
=====loss per epoch: [0.004613]
=====loss per epoch: [0.00461019]
=====loss per epoch: [0.00460739]
=====loss per epoch: [0.00460462]
=====loss per epoch: [0.00460188]
=====loss per epoch: [0.00459917]
=====loss per epoch: [0.00459648]
=====loss per epoch: [0.00459382]



Well, we did not manage to get the perfect decision boundary—we implemented our own optimizer, did not regularize, nor did we explore the architecture—but you can see with the ResNet (400 epochs) fits better than the ReLNet (1000 epochs) when the number of neurons equals the 2, the dimensions of the problem at hand.