Welcome to OGeek Q&A Community for programmer and developer-Open, Learning and Share
Welcome To Ask or Share your Answers For Others

Categories

0 votes
869 views
in Technique[技术] by (71.8m points)

compiler errors - AttributeError: 'Model' object has no attribute 'predict_classes'

I'm trying to predict on the validation data with pre-trained and fine-tuned DL models. The code follows the example available in the Keras blog on "building image classification models using very little data". Here is the code:

import numpy as np
from keras.preprocessing.image import ImageDataGenerator
import matplotlib.pyplot as plt
from keras.models import Sequential
from keras.models import Model
from keras.layers import Flatten, Dense
from sklearn.metrics import classification_report,confusion_matrix
from sklearn.metrics import roc_auc_score
import itertools
from keras.optimizers import SGD
from sklearn.metrics import roc_curve, auc
from keras import applications
from keras import backend as K
K.set_image_dim_ordering('tf')

# Plotting the confusion matrix
def plot_confusion_matrix(cm, classes,
                          normalize=False, #if true all values in confusion matrix is between 0 and 1
                          title='Confusion matrix',
                          cmap=plt.cm.Blues):
    """
    This function prints and plots the confusion matrix.
    Normalization can be applied by setting `normalize=True`.
    """
    plt.imshow(cm, interpolation='nearest', cmap=cmap)
    plt.title(title)
    plt.colorbar()
    tick_marks = np.arange(len(classes))
    plt.xticks(tick_marks, classes, rotation=45)
    plt.yticks(tick_marks, classes)

    if normalize:
        cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
        print("Normalized confusion matrix")
    else:
        print('Confusion matrix, without normalization')

    print(cm)
    thresh = cm.max() / 2.
    for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):
        plt.text(j, i, cm[i, j],
                 horizontalalignment="center",
                 color="white" if cm[i, j] > thresh else "black")
    plt.tight_layout()
    plt.ylabel('True label')
    plt.xlabel('Predicted label')

#plot data
def generate_results(validation_labels, y_pred):
    fpr, tpr, _ = roc_curve(validation_labels, y_pred) ##(this implementation is restricted to a binary classification task)
    roc_auc = auc(fpr, tpr)
    plt.figure()
    plt.plot(fpr, tpr, label='ROC curve (area = %0.2f)' % roc_auc)
    plt.plot([0, 1], [0, 1], 'k--')
    plt.xlim([0.0, 1.05])
    plt.ylim([0.0, 1.05])
    plt.xlabel('False Positive Rate (FPR)')
    plt.ylabel('True Positive Rate (TPR)')
    plt.title('Receiver operating characteristic (ROC) curve')
    plt.show()
    print('Area Under the Curve (AUC): %f' % roc_auc)

img_width, img_height = 100,100
top_model_weights_path = 'modela.h5'
train_data_dir = 'data4/train'
validation_data_dir = 'data4/validation'
nb_train_samples = 20
nb_validation_samples = 20
epochs = 50
batch_size = 10
def save_bottleneck_features():
   datagen = ImageDataGenerator(rescale=1. / 255)
   model = applications.VGG16(include_top=False, weights='imagenet', input_shape=(100,100,3))
   generator = datagen.flow_from_directory(
               train_data_dir,
               target_size=(img_width, img_height),
               batch_size=batch_size,
               class_mode='binary',
               shuffle=False)
   bottleneck_features_train = model.predict_generator(
               generator, nb_train_samples // batch_size)
   np.save(open('bottleneck_features_train', 'wb'),bottleneck_features_train)

   generator = datagen.flow_from_directory(
               validation_data_dir,
               target_size=(img_width, img_height),
               batch_size=batch_size,
               class_mode='binary',
               shuffle=False)
   bottleneck_features_validation = model.predict_generator(
               generator, nb_validation_samples // batch_size)
   np.save(open('bottleneck_features_validation', 'wb'),bottleneck_features_validation)

def train_top_model():
   train_data = np.load(open('bottleneck_features_train', 'rb'))
   train_labels = np.array([0] * (nb_train_samples // 2) + [1] * (nb_train_samples // 2))
   validation_data = np.load(open('bottleneck_features_validation', 'rb'))
   validation_labels = np.array([0] * (nb_validation_samples // 2) + [1] * (nb_validation_samples // 2))
   model = Sequential()
   model.add(Flatten(input_shape=train_data.shape[1:]))
   model.add(Dense(512, activation='relu'))
   model.add(Dense(1, activation='sigmoid'))
   sgd = SGD(lr=1e-3, decay=0.00, momentum=0.99, nesterov=False) 
   model.compile(optimizer=sgd,
         loss='binary_crossentropy', metrics=['accuracy'])
   model.fit(train_data, train_labels,
          epochs=epochs,
          batch_size=batch_size,
   validation_data=(validation_data, validation_labels))
   model.save_weights(top_model_weights_path)
   print('Predicting on test data')
   y_pred = model.predict_classes(validation_data)
   print(y_pred.shape)
   print('Generating results')
   generate_results(validation_labels[:,], y_pred[:,])
   print('Generating the ROC_AUC_Scores') #Compute Area Under the Curve (AUC) from prediction scores
   print(roc_auc_score(validation_labels,y_pred)) #this implementation is restricted to the binary classification task or multilabel classification task in label indicator format.
   target_names = ['class 0(Normal)', 'class 1(Abnormal)']
   print(classification_report(validation_labels,y_pred,target_names=target_names))
   print(confusion_matrix(validation_labels,y_pred))
   cnf_matrix = (confusion_matrix(validation_labels,y_pred))
   np.set_printoptions(precision=2)
   plt.figure()
   # Plot non-normalized confusion matrix
   plot_confusion_matrix(cnf_matrix, classes=target_names,
                      title='Confusion matrix')
   plt.show()
save_bottleneck_features()
train_top_model()

# path to the model weights files.
weights_path = '../keras/examples/vgg16_weights.h5'
top_model_weights_path = 'modela.h5'
# dimensions of our images.
img_width, img_height = 100, 100
train_data_dir = 'data4/train'
validation_data_dir = 'data4/validation'
nb_train_samples = 20
nb_validation_samples = 20
epochs = 50
batch_size = 10

# build the VGG16 network
base_model = applications.VGG16(weights='imagenet', include_top=False, input_shape=(100,100,3))
print('Model loaded.')

train_datagen = ImageDataGenerator(
    rescale=1. / 255,
    shear_range=0.2,
    zoom_range=0.2,
    horizontal_flip=True)
test_datagen = ImageDataGenerator(rescale=1. / 255)
train_generator = train_datagen.flow_from_directory(
    train_data_dir,
    target_size=(img_height, img_width),
    batch_size=batch_size,
    class_mode='binary')
validation_generator = test_datagen.flow_from_directory(
    validation_data_dir,
    target_size=(img_height, img_width),
    batch_size=batch_size,
    class_mode='binary') 
top_model = Sequential()
top_model.add(Flatten(input_shape=base_model.output_shape[1:]))
top_model.add(Dense(512, activation='relu'))
top_model.add(Dense(1, activation='softmax'))
top_model.load_weights(top_model_weights_path)
model = Model(inputs=base_model.input, outputs=top_model(base_model.output))
   # set the first 15 layers (up to the last conv block)
# to non-trainable (weights will not be updated)
for layer in model.layers[:15]: #up to the layer before the last convolution block
        layer.trainable = False
model.summary()
   # fine-tune the model
model.compile(loss='binary_crossentropy', optimizer=SGD(lr=1e-4, momentum=0.99), metrics=['accuracy'])
model.fit_generator(train_generator,
    steps_per_epoch=nb_train_samples // batch_size,
    epochs=epochs,
    validation_data=validation_generator,
    validation_steps=nb_validation_samples // batch_size,
    verbose=1)
model.save_weights(top_model_weights_path)
bottleneck_features_validation = model.predict_generator(validation_generator, nb_validation_samples // batch_size)
np.save(open('bottleneck_features_validation','wb'), bottleneck_features_validation)
validation_data = np.load(open('bottleneck_features_validation', 'rb'))
y_pred1 = model.predict_classes(validation_data)

The problem is that the pre-trained model is getting trained on the data and predicts the classes perfectly and gives the confusion matrix as well. As I proceed to fine-tuning the model, I could find that model.predict_classes is not working. Here is the error:

File "C:/Users/rajaramans2/codes/untitled12.py", line 220, in <module>
    y_pred1 = model.predict_classes(validation_data)

AttributeError: 'Model' object has no attribute 'predict_classes'

I am confused because, model.predict_classes worked well with the pre-trained model, but not in the fine-tuning stage. The size of validation data is (20,1) and float32 type. Any help would be appreciated.

See Question&Answers more detail:os

与恶龙缠斗过久,自身亦成为恶龙;凝视深渊过久,深渊将回以凝视…
Welcome To Ask or Share your Answers For Others

1 Reply

0 votes
by (71.8m points)

The predict_classes method is only available for the Sequential class (which is the class of your first model) but not for the Model class (the class of your second model).

With the Model class, you can use the predict method which will give you a vector of probabilities and then get the argmax of this vector (with np.argmax(y_pred1,axis=1)).


与恶龙缠斗过久,自身亦成为恶龙;凝视深渊过久,深渊将回以凝视…
OGeek|极客中国-欢迎来到极客的世界,一个免费开放的程序员编程交流平台!开放,进步,分享!让技术改变生活,让极客改变未来! Welcome to OGeek Q&A Community for programmer and developer-Open, Learning and Share
Click Here to Ask a Question

...