Skip to content

Commit e0b12bb

Browse files
committed
Revise package structure to ease usability
This is a user-experience-focused package. Without a cogent API, it would seem hypocritical and insincere to make another crap tool that promises the world.
1 parent 34dcc05 commit e0b12bb

File tree

12 files changed

+110
-33
lines changed

12 files changed

+110
-33
lines changed

README.md

Lines changed: 29 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -4,4 +4,32 @@ This API is undergoing wild changes as it approaches release.
44
* Almost every change will be a breaking change.
55
* __Do not__ rely on the functions as they currently are
66

7-
Please feel free to look around, but bookmark which release tag it was. Master will be changing, viciously.
7+
Please feel free to look around, but bookmark which release tag it was. Master will be changing, viciously.
8+
9+
10+
---
11+
12+
# Understanding [MorphNet](https://arxiv.org/abs/1711.06798)
13+
14+
A Stephen Fox endeavor to become an Applied AI Scientist.
15+
16+
## Setup (to work alongside me)
17+
18+
`git clone https://github.com/stephenjfox/Morph.py.git`
19+
20+
## Requisites
21+
22+
### [Install Anaconda](https://www.anaconda.com/download/)
23+
* They've made it easier with the years. If you haven't already, please give it a try
24+
25+
### Install Pip
26+
27+
1. `conda install pip`
28+
2. Proceed as normal
29+
30+
### Dependencies
31+
32+
- Jupyter Notebook
33+
* And a few tools to make it better on your local environment like `nb_conda`, `nbconvert`, and `nb_conda_kernels`
34+
- Python 3.6+ because [Python 2 is dying](https://pythonclock.org/)
35+
- PyTorch (`conda install torch torchvision`)

demo.py

Lines changed: 17 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1,8 +1,23 @@
1-
from .my_morph import MorphNet
1+
import torch
2+
import torch.nn as nn
3+
from torch.utils.data import TensorDataset, DataLoader
24

5+
import morph
6+
import morph.nn as net
7+
from morph.layers.sparse import sparsify
8+
9+
from morph._models import EasyMnist
310

411
def main():
5-
pass
12+
my_model = EasyMnist()
13+
# do one pass through the algorithm
14+
modified = morph.once(my_model)
15+
16+
my_dataloader = DataLoader(TensorDataset(torch.randn(2, 28, 28)))
17+
18+
# get back the class that will do work
19+
morphed = net.Morph(my_model, epochs=5, dataloader=my_dataloader)
20+
morphed.run_training()
621

722

823
if __name__ == '__main__':

morph/__init__.py

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1,2 +1 @@
1-
from .morph import morph
2-
from .nn.morph_net import *
1+
from .nn.morph import once # facility tate "morph.once"

morph/_models.py

Lines changed: 21 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,21 @@
1+
import torch
2+
import torch.nn as nn
3+
import torch.nn.functional as F
4+
5+
class EasyMnist(nn.Module):
6+
def __init__(self):
7+
super().__init__()
8+
self.linear1 = nn.Linear(784, 1000)
9+
self.linear2 = nn.Linear(1000, 30)
10+
self.linear3 = nn.Linear(30, 10)
11+
12+
def forward(self, x_batch: torch.Tensor):
13+
"""Simple ReLU-based activations through all layers of the DNN.
14+
Simple and effectively deep neural network. No frills.
15+
"""
16+
_input = x_batch.view(-1, 784) # shape for our linear1
17+
out1 = F.relu(self.linear1(x_batch))
18+
out2 = F.relu(self.linear2(out1))
19+
out3 = F.relu(self.linear3(out2))
20+
21+
return out3

morph/layers/__init__.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,2 +1,2 @@
1-
from .sparsify import *
1+
from .sparse import *
22
from .widen import widen
File renamed without changes.

morph/morph.py

Lines changed: 0 additions & 10 deletions
This file was deleted.

morph/nn/__init__.py

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,2 @@
1+
from ._morph_net import Morph
2+
from .morph import once

morph/nn/_morph_net.py

Lines changed: 25 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,25 @@
1+
from morph.layers.sparse import percent_waste
2+
import torch.nn as nn
3+
from torch.utils.data import DataLoader
4+
5+
class Morph(nn.Module):
6+
"""An encapsulation of the benefits of MorphNet, namely:
7+
1. automatically shrinking and widening, to produce a new architecture w.r.t. layer widths
8+
2. Training of the network, to match (or beat) model performance
9+
3.
10+
"""
11+
12+
@classmethod
13+
def shrink_out(cls, child_layer):
14+
new_out = int(child_layer.out_features * percent_waste(child_layer))
15+
return nn.Linear(child_layer.in_features, new_out)
16+
17+
def __init__(self, net: nn.Module, epochs: int, dataloader: DataLoader):
18+
super().__init__()
19+
self.layers = nn.ModuleList([
20+
Morph.shrink_out(c) for c in net.children()
21+
])
22+
23+
def run_training(self):
24+
"""Performs the managed training for this instance"""
25+
pass

morph/nn/morph.py

Lines changed: 12 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,12 @@
1+
import torch.nn as nn
2+
3+
def once(net: nn.Module, experimental_support=False) -> nn.Module:
4+
"""Runs an experimental implementation of the MorphNet algorithm on `net`
5+
producing a new network:
6+
1. Shrink the layers o
7+
8+
Returns: either `net` if `experimental_support == False` or a MorphNet of
9+
the supplied `net`.
10+
"""
11+
# TODO: run the algorithm
12+
return net

0 commit comments

Comments
 (0)