I am following F.Chollet book "Deep learning with python" and can't get one example working.
In particular, I am running an example from chapter "Training a convnet from scratch on a small dataset".
My training dataset has 2000 sample and I am trying to extend it with augmentation using ImageDataGenerator. Despite that my code is exactly the same, I am getting error:
Your input ran out of data; interrupting training. Make sure that your
dataset or generator can generate at least steps_per_epoch * epochs
batches (in this case, 10000 batches).
from keras import layers
from keras import models
from keras import optimizers
from keras.preprocessing.image import ImageDataGenerator
# creating model
model = models.Sequential()
model.add(layers.Conv2D(32, (3, 3), activation='relu',
input_shape=(150, 150, 3)))
model.add(layers.MaxPooling2D((2, 2)))
model.add(layers.Conv2D(64, (3, 3), activation='relu'))
model.add(layers.MaxPooling2D((2, 2)))
model.add(layers.Conv2D(128, (3, 3), activation='relu'))
model.add(layers.MaxPooling2D((2, 2)))
model.add(layers.Conv2D(128, (3, 3), activation='relu'))
model.add(layers.MaxPooling2D((2, 2)))
model.add(layers.Flatten())
model.add(layers.Dropout(0.5))
model.add(layers.Dense(512, activation='relu'))
model.add(layers.Dense(1, activation='sigmoid'))
# model compilation
model.compile(loss='binary_crossentropy',
optimizer=optimizers.RMSprop(lr=1e-4),
metrics=['acc'])
# model.summary()
# generating trains and test sets with rescaling 0-255 -> 0-1
train_dir = 'c:\Work\Code\Python\DL\cats_and_dogs_small\train\'
validation_dir = 'c:\Work\Code\Python\DL\cats_and_dogs_small\validation\'
train_datagen = ImageDataGenerator(
rescale=1./255,
rotation_range=40,
width_shift_range=0.2,
height_shift_range=0.2,
shear_range=0.2,
zoom_range=0.2,
horizontal_flip=True,)
# Note that the validation data should not be augmented!
test_datagen = ImageDataGenerator(rescale=1./255)
train_generator = train_datagen.flow_from_directory(
# This is the target directory
train_dir,
# All images will be resized to 150x150
target_size=(150, 150),
batch_size=32,
# Since we use binary_crossentropy loss, we need binary labels
class_mode='binary')
validation_generator = test_datagen.flow_from_directory(
validation_dir,
target_size=(150, 150),
batch_size=32,
class_mode='binary')
for data_batch, labels_batch in train_generator:
print('data batch shape:', data_batch.shape)
print('labels batch shape:', labels_batch.shape)
break
history = model.fit_generator(
train_generator,
steps_per_epoch=100,
epochs=100,
validation_data=validation_generator,
validation_steps=50)
Here is the link to github page for this book samples. Where you can check the code as well.
I am not sure what I am doing wrong and asking any advice. Thank you
question from:
https://stackoverflow.com/questions/65870942/imagedatagenerator-doesnt-generate-enough-samples