Skip to content
This repository was archived by the owner on Nov 9, 2022. It is now read-only.

Commit 456dcbd

Browse files
authored
Merge branch 'master' into quickstart
2 parents 847737d + 2197dc5 commit 456dcbd

File tree

12 files changed

+460
-28
lines changed

12 files changed

+460
-28
lines changed

libs/config.py

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -22,6 +22,10 @@
2222
(200, 130, 0) : 6,
2323
}
2424

25+
LABELMAP_RGB = { k: (v[2], v[1], v[0]) for k, v in LABELMAP.items() }
26+
27+
INV_LABELMAP_RGB = { v: k for k, v in LABELMAP_RGB.items() }
28+
2529
train_ids = [
2630
"1d4fbe33f3_F1BE1D4184INSPIRE",
2731
"1df70e7340_4413A67E91INSPIRE",

libs/datasets.py

Lines changed: 0 additions & 22 deletions
Original file line numberDiff line numberDiff line change
@@ -1,10 +1,3 @@
1-
import os
2-
from fastai.vision import *
3-
from fastai.callbacks.hooks import *
4-
from pathlib import PosixPath
5-
6-
import numpy as np
7-
from libs.config import LABELS
81
import libs.images2chips
92
import sys
103
import os
@@ -43,18 +36,3 @@ def download_dataset(dataset):
4336
libs.images2chips.run(dataset)
4437
else:
4538
print(f'chip folders "{image_chips}" and "{label_chips}" already exist, remove them to recreate chips.')
46-
47-
def load_dataset(dataset, training_chip_size, bs):
48-
""" Load a dataset, create batches and augmentation """
49-
50-
path = PosixPath(dataset)
51-
label_path = path/'label-chips'
52-
image_path = path/'image-chips'
53-
image_files = get_image_files(image_path)
54-
label_files = get_image_files(label_path)
55-
get_y_fn = lambda x: label_path/f'{x.stem}{x.suffix}'
56-
codes = np.array(LABELS)
57-
src = SegmentationItemList.from_folder(image_path).split_by_fname_file('../valid.txt').label_from_func(get_y_fn, classes=codes)
58-
# some data augmentation here
59-
data = src.transform(get_transforms(flip_vert=True, max_warp=0., max_zoom=0., max_rotate=180.), size=training_chip_size, tfm_y=True).databunch(bs=bs)
60-
return data

libs/datasets_fastai.py

Lines changed: 21 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,21 @@
1+
from fastai.vision import *
2+
from fastai.callbacks.hooks import *
3+
from pathlib import PosixPath
4+
5+
import numpy as np
6+
from libs.config import LABELS
7+
8+
def load_dataset(dataset, training_chip_size, bs):
9+
""" Load a dataset, create batches and augmentation """
10+
11+
path = PosixPath(dataset)
12+
label_path = path/'label-chips'
13+
image_path = path/'image-chips'
14+
image_files = get_image_files(image_path)
15+
label_files = get_image_files(label_path)
16+
get_y_fn = lambda x: label_path/f'{x.stem}{x.suffix}'
17+
codes = np.array(LABELS)
18+
src = SegmentationItemList.from_folder(image_path).split_by_fname_file('../valid.txt').label_from_func(get_y_fn, classes=codes)
19+
# some data augmentation here
20+
data = src.transform(get_transforms(flip_vert=True, max_warp=0., max_zoom=0., max_rotate=180.), size=training_chip_size, tfm_y=True).databunch(bs=bs)
21+
return data

libs/datasets_keras.py

Lines changed: 65 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,65 @@
1+
from keras.preprocessing.image import ImageDataGenerator
2+
from keras.utils import Sequence, to_categorical
3+
from PIL import Image
4+
5+
import numpy as np
6+
import random
7+
8+
def load_dataset(dataset, bs, aug={'horizontal_flip': True, 'vertical_flip': True, 'rotation_range': 180}):
9+
train_files = [f'{dataset}/image-chips/{fname}' for fname in load_lines(f'{dataset}/train.txt')]
10+
valid_files = [f'{dataset}/image-chips/{fname}' for fname in load_lines(f'{dataset}/valid.txt')]
11+
12+
train_seq = SegmentationSequence(
13+
dataset,
14+
train_files,
15+
ImageDataGenerator(**aug),
16+
bs
17+
)
18+
19+
valid_seq = SegmentationSequence(
20+
dataset,
21+
valid_files,
22+
ImageDataGenerator(), # don't augment validation set
23+
bs
24+
)
25+
26+
return train_seq, valid_seq
27+
28+
def load_lines(fname):
29+
with open(fname, 'r') as f:
30+
return [l.strip() for l in f.readlines()]
31+
32+
def load_img(fname):
33+
return np.array(Image.open(fname))
34+
35+
def mask_to_classes(mask):
36+
return to_categorical(mask[:,:,0], 6)
37+
38+
class SegmentationSequence(Sequence):
39+
def __init__(self, dataset, image_files, datagen, bs):
40+
self.label_path = f'{dataset}/label-chips'
41+
self.image_path = f'{dataset}/image-chips'
42+
self.image_files = image_files
43+
random.shuffle(self.image_files)
44+
45+
self.datagen = datagen
46+
self.bs = bs
47+
48+
def __len__(self):
49+
return int(np.ceil(len(self.image_files) / float(self.bs)))
50+
51+
def __getitem__(self, idx):
52+
image_files = self.image_files[idx*self.bs:(idx+1)*self.bs]
53+
label_files = [fname.replace(self.image_path, self.label_path) for fname in image_files]
54+
55+
images = [load_img(fname) for fname in image_files]
56+
labels = [mask_to_classes(load_img(fname)) for fname in label_files]
57+
58+
ts = [self.datagen.get_random_transform(im.shape) for im in images]
59+
images = [self.datagen.apply_transform(im, ts) for im, ts in zip(images, ts)]
60+
labels = [self.datagen.apply_transform(im, ts) for im, ts in zip(labels, ts)]
61+
62+
return np.array(images), np.array(labels)
63+
64+
def on_epoch_end(self):
65+
random.shuffle(self.image_files)

libs/inference_keras.py

Lines changed: 71 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,71 @@
1+
from PIL import Image
2+
import numpy as np
3+
import math
4+
from keras import models
5+
import os
6+
7+
from libs.config import train_ids, test_ids, val_ids, LABELMAP_RGB
8+
9+
def category2mask(img):
10+
""" Convert a category image to color mask """
11+
if len(img) == 3:
12+
if img.shape[2] == 3:
13+
img = img[:, :, 0]
14+
15+
mask = np.zeros(img.shape[:2] + (3, ), dtype='uint8')
16+
17+
for category, mask_color in LABELMAP_RGB.items():
18+
locs = np.where(img == category)
19+
mask[locs] = mask_color
20+
21+
return mask
22+
23+
def chips_from_image(img, size=300):
24+
shape = img.shape
25+
26+
chip_count = math.ceil(shape[1] / size) * math.ceil(shape[0] / size)
27+
28+
chips = []
29+
for x in range(0, shape[1], size):
30+
for y in range(0, shape[0], size):
31+
chip = img[y:y+size, x:x+size, :]
32+
y_pad = size - chip.shape[0]
33+
x_pad = size - chip.shape[1]
34+
chip = np.pad(chip, [(0, y_pad), (0, x_pad), (0, 0)], mode='constant')
35+
chips.append((chip, x, y))
36+
return chips
37+
38+
def run_inference_on_file(imagefile, predsfile, model, size=300):
39+
with Image.open(imagefile).convert('RGB') as img:
40+
nimg = np.array(Image.open(imagefile).convert('RGB'))
41+
shape = nimg.shape
42+
chips = chips_from_image(nimg)
43+
44+
chips = [(chip, xi, yi) for chip, xi, yi in chips if chip.sum() > 0]
45+
prediction = np.zeros(shape[:2], dtype='uint8')
46+
chip_preds = model.predict(np.array([chip for chip, _, _ in chips]), verbose=True)
47+
48+
for (chip, x, y), pred in zip(chips, chip_preds):
49+
category_chip = np.argmax(pred, axis=-1) + 1
50+
section = prediction[y:y+size, x:x+size].shape
51+
prediction[y:y+size, x:x+size] = category_chip[:section[0], :section[1]]
52+
53+
mask = category2mask(prediction)
54+
Image.fromarray(mask).save(predsfile)
55+
56+
def run_inference(dataset, model=None, model_path=None, basedir='.'):
57+
if model is None and model_path is None:
58+
raise Exception("model or model_path required")
59+
60+
if model is None:
61+
model = models.load_model(model_path)
62+
63+
for scene in train_ids + val_ids + test_ids:
64+
imagefile = f'{dataset}/images/{scene}-ortho.tif'
65+
predsfile = os.path.join(basedir, f'{scene}-prediction.png')
66+
67+
if not os.path.exists(imagefile):
68+
continue
69+
70+
print(f'running inference on image {imagefile}.')
71+
run_inference_on_file(imagefile, predsfile, model)

libs/models_keras.py

Lines changed: 128 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,128 @@
1+
from keras import layers, models
2+
import numpy as np
3+
import tensorflow as tf
4+
5+
def build_unet(size=300, basef=64, maxf=512, encoder='resnet50', pretrained=True):
6+
input = layers.Input((size, size, 3))
7+
8+
encoder_model = make_encoder(input, name=encoder, pretrained=pretrained)
9+
10+
crosses = []
11+
12+
for layer in encoder_model.layers:
13+
# don't end on padding layers
14+
if type(layer) == layers.ZeroPadding2D:
15+
continue
16+
idx = get_scale_index(size, layer.output_shape[1])
17+
if idx is None:
18+
continue
19+
if idx >= len(crosses):
20+
crosses.append(layer)
21+
else:
22+
crosses[idx] = layer
23+
24+
x = crosses[-1].output
25+
for scale in range(len(crosses)-2, -1, -1):
26+
nf = min(basef * 2**scale, maxf)
27+
x = upscale(x, nf)
28+
x = act(x)
29+
x = layers.Concatenate()([
30+
pad_to_scale(x, scale, size=size),
31+
pad_to_scale(crosses[scale].output, scale, size=size)
32+
])
33+
x = conv(x, nf)
34+
x = act(x)
35+
36+
x = conv(x, 6)
37+
x = layers.Activation('softmax')(x)
38+
39+
return models.Model(input, x)
40+
41+
def make_encoder(input, name='resnet50', pretrained=True):
42+
if name == 'resnet18':
43+
from classification_models.keras import Classifiers
44+
ResNet18, _ = Classifiers.get('resnet18')
45+
model = ResNet18(
46+
weights='imagenet' if pretrained else None,
47+
input_tensor=input,
48+
include_top=False
49+
)
50+
elif name == 'resnet50':
51+
from keras.applications.resnet import ResNet50
52+
model = ResNet50(
53+
weights='imagenet' if pretrained else None,
54+
input_tensor=input,
55+
include_top=False
56+
)
57+
elif name == 'resnet101':
58+
from keras.applications.resnet import ResNet101
59+
model = ResNet101(
60+
weights='imagenet' if pretrained else None,
61+
input_tensor=input,
62+
include_top=False
63+
)
64+
elif name == 'resnet152':
65+
from keras.applications.resnet import ResNet152
66+
model = ResNet152(
67+
weights='imagenet' if pretrained else None,
68+
input_tensor=input,
69+
include_top=False
70+
)
71+
elif name == 'vgg16':
72+
from keras.applications.vgg16 import VGG16
73+
model = VGG16(
74+
weights='imagenet' if pretrained else None,
75+
input_tensor=input,
76+
include_top=False
77+
)
78+
elif name == 'vgg19':
79+
from keras.applications.vgg19 import VGG19
80+
model = VGG19(
81+
weights='imagenet' if pretrained else None,
82+
input_tensor=input,
83+
include_top=False
84+
)
85+
else:
86+
raise Exception(f'unknown encoder {name}')
87+
88+
return model
89+
90+
def get_scale_index(in_size, l_size):
91+
for i in range(8):
92+
s_size = in_size // (2 ** i)
93+
if abs(l_size - s_size) <= 4:
94+
return i
95+
return None
96+
97+
def pad_to_scale(x, scale, size=300):
98+
expected = int(np.ceil(size / (2. ** scale)))
99+
diff = expected - int(x.shape[1])
100+
if diff > 0:
101+
left = diff // 2
102+
right = diff - left
103+
x = reflectpad(x, (left, right))
104+
elif diff < 0:
105+
left = -diff // 2
106+
right = -diff - left
107+
x = layers.Cropping2D(((left, right), (left, right)))(x)
108+
return x
109+
110+
def reflectpad(x, pad):
111+
return layers.Lambda(lambda x: tf.pad(x, [(0, 0), pad, pad, (0, 0)], 'REFLECT'))(x)
112+
113+
def upscale(x, nf):
114+
x = layers.UpSampling2D((2, 2))(x)
115+
x = conv(x, nf, kernel_size=(1, 1))
116+
return x
117+
118+
def act(x):
119+
x = layers.BatchNormalization()(x)
120+
x = layers.LeakyReLU(0.2)(x)
121+
return x
122+
123+
def conv(x, nf, kernel_size=(3, 3), **kwargs):
124+
padleft = (kernel_size[0] - 1) // 2
125+
padright = kernel_size[0] - 1 - padleft
126+
if padleft > 0 or padright > 0:
127+
x = reflectpad(x, (padleft, padright))
128+
return layers.Conv2D(nf, kernel_size=kernel_size, padding='valid', **kwargs)(x)

libs/scoring.py

Lines changed: 5 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -52,10 +52,11 @@ def plot_confusion_matrix(y_true, y_pred, classes,
5252
im = ax.imshow(cm, interpolation='nearest', cmap=cmap)
5353
ax.figure.colorbar(im, ax=ax)
5454

55+
base, fname = os.path.split(title)
5556
ax.set(xticks=np.arange(cm.shape[1]),
5657
yticks=np.arange(cm.shape[0]),
5758
xticklabels=classes, yticklabels=classes,
58-
title=title,
59+
title=fname,
5960
ylabel='True label',
6061
xlabel='Predicted label')
6162

@@ -79,6 +80,7 @@ def plot_confusion_matrix(y_true, y_pred, classes,
7980
if not os.path.isdir(savedir):
8081
os.mkdir(savedir)
8182
savefile = savedir + '/score-' + title
83+
8284
plt.savefig(savefile)
8385
return savefile, cm
8486

@@ -117,7 +119,7 @@ def score_masks(labelfile, predictionfile):
117119

118120
return precision, recall, f1, savefile
119121

120-
def score_predictions(dataset):
122+
def score_predictions(dataset, basedir='.'):
121123

122124
scores = []
123125

@@ -132,7 +134,7 @@ def score_predictions(dataset):
132134
for scene in test_ids:
133135

134136
labelfile = f'{dataset}/labels/{scene}-label.png'
135-
predsfile = f"{scene}-prediction.png"
137+
predsfile = os.path.join(basedir, f"{scene}-prediction.png")
136138

137139
if not os.path.exists(labelfile):
138140
continue

libs/training.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -8,7 +8,7 @@
88
from libs import inference
99
from libs import scoring
1010
from libs.util import MySaveModelCallback, ExportCallback, MyCSVLogger, Precision, Recall, FBeta
11-
from libs import datasets
11+
from libs import datasets_fastai
1212

1313
import wandb
1414
from wandb.fastai import WandbCallback
@@ -41,7 +41,7 @@ def train_model(dataset):
4141
FBeta(average='weighted', beta=1, clas_idx=1),
4242
]
4343

44-
data = datasets.load_dataset(dataset, size, bs)
44+
data = datasets_fastai.load_dataset(dataset, size, bs)
4545
encoder_model = models.resnet18
4646
learn = unet_learner(data, encoder_model, path='models', metrics=metrics, wd=wd, bottle=True, pretrained=pretrained)
4747

0 commit comments

Comments
 (0)