I have a problem where the accuracy of my k-fold does not return to 0, but continues the accuracy of the last epoch on the previous fold, please find a solution
Below is the code for the image generator
IMAGE_SHAPE = (224, 224)
TRAINING_DATA_DIR = str(directory_root)
datagen_kwargs = dict(
rescale = 1. / 255,
rotation_range=25,
shear_range=0.2,
horizontal_flip=True,
validation_split=.20)
# Make Validation Datagen
valid_datagen = tf.keras.preprocessing.image.ImageDataGenerator(**datagen_kwargs)
valid_generator = valid_datagen.flow_from_directory(
TRAINING_DATA_DIR,
subset='validation',
shuffle=True,
target_size=IMAGE_SHAPE,
batch_size=100)
# Make Train Datagen
train_datagen = tf.keras.preprocessing.image.ImageDataGenerator(**datagen_kwargs)
train_generator = train_datagen.flow_from_directory(
TRAINING_DATA_DIR,
subset='training',
shuffle=True,
target_size=IMAGE_SHAPE,
batch_size=100)
Below is the code for k-fold
for train, test in kfold.split(inputs, targets):
# Define callbacks
checkpoint_path = f'/content/drive/MyDrive/Colab Notebooks/saveModel/Model 1/{fold_no}'
os.mkdir(checkpoint_path)
keras_callbacks = [
ModelCheckpoint(checkpoint_path, monitor='val_loss', save_best_only=True, mode='min')
]
x_t, x_ts = inputs[train] , targets[test]
y_t, y_ts = inputs[train] , targets[test]
model_history = model.fit(
train_generator,
epochs=EPOCHS,
verbose=1,
steps_per_epoch=steps_per_epoch,
validation_data=valid_generator,
validation_steps=val_steps_per_epoch,
callbacks=keras_callbacks).history
question from:
https://stackoverflow.com/questions/65843778/k-fold-accuracy-does-not-return-to-0 与恶龙缠斗过久,自身亦成为恶龙;凝视深渊过久,深渊将回以凝视…