I am running a CNN that check for images but does not classify. In fact, the output layer is a dense layer that have as argument the size of the images in the labels in 1d.
As shown below in the code, I am using model.fit_generator() instead of model.fit and when it comes to start training the model the following error comes up:
TypeError: float() argument must be a string or a number, not 'builtin_function_or_method'
I am not really getting why this is happening. Here attached is the summary of the model:
Layer (type) Output Shape Param #
conv2d_4 (Conv2D) (None, 26, 877, 32) 544
activation_5 (Activation) (None, 26, 877, 32) 0
max_pooling2d_4 (MaxPooling2 (None, 13, 438, 32) 0
conv2d_5 (Conv2D) (None, 12, 437, 16) 2064
activation_6 (Activation) (None, 12, 437, 16) 0
max_pooling2d_5 (MaxPooling2 (None, 6, 218, 16) 0
conv2d_6 (Conv2D) (None, 5, 217, 8) 520
activation_7 (Activation) (None, 5, 217, 8) 0
max_pooling2d_6 (MaxPooling2 (None, 2, 108, 8) 0
activation_8 (Activation) (None, 2, 108, 8) 0
flatten_2 (Flatten) (None, 1728) 0
dropout_2 (Dropout) (None, 1728) 0
dense_2 (Dense) (None, 19316) 33397364
=================================================================
Total params: 33,400,492 Trainable params: 33,400,492 Non-trainable params: 0
Any suggestions ? Thanks a lot in advance!
I have already looked up many of the online forums/websites but I don't seem to find one that suits my case.
def generator(data_arr, batch_size = 10):
num = len(data_arr)
if num % batch_size != 0 :
num = int(num/batch_size)
# Loop forever so the generator never terminates
while True:
for offset in range(0, num, batch_size):
batch_samples = (data_arr[offset:offset+batch_size])
samples = []
labels = []
for batch_sample in batch_samples:
samples.append(batch_sample[0])
labels.append((np.array(batch_sample[1].flatten)).transpose())
X_ = np.array(samples)
Y_ = np.array(labels)
X_ = X_[:, :, :, newaxis]
print(X_.shape)
print(Y_.shape)
yield (X_, Y_)
# compile and train the model using the generator function
train_generator = generator(training_data, batch_size = 10)
validation_generator = generator(val_data, batch_size = 10)
run_opts = tf.RunOptions(report_tensor_allocations_upon_oom = True)
model = Sequential()
model.add(Conv2D(32, (4, 4), strides=(2, 2), input_shape = (55, 1756,
1)))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size = (2, 2)))
model.add(Conv2D(16, (2, 2)))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size = (2, 2)))
model.add(Conv2D(8, (2, 2)))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size = (2, 2)))
model.add(Activation('softmax'))
model.add(Flatten()) # this converts our 3D feature maps to 1D feature
vectors
model.add(Dropout(0.3))
model.add(Dense(19316))
model.compile(loss = 'sparse_categorical_crossentropy',
optimizer = 'adam',
metrics = ['accuracy'],
options = run_opts)
model.summary()
batch_size = 20
nb_epoch = 6
model.fit_generator(train_generator,
steps_per_epoch = len(training_data) ,
epochs = nb_epoch,
validation_data = validation_generator,
validation_steps = len(val_data))