Replies: 1 comment
-
I solved the mistake, I simply had to remove "," and add ".". Sorry for causing a misunderstanding, I'll let you know if something more serious comes up. |
Beta Was this translation helpful? Give feedback.
0 replies
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment
Uh oh!
There was an error while loading. Please reload this page.
-
I am currently working with malaria dataset and when I try to use prefetch, I get the error mentioned above.
Here is the source code:
-- coding: utf-8 --
"""malaria_detection.ipynb
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/drive/1bRu3Nej0EPd9aofiB4vFsJZNgpyWAs2O
Importing Libraries
"""
import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
import tensorflow_datasets as tfds
from keras.layers import Conv2D, MaxPool2D, Dense, InputLayer, Flatten
from keras import Sequential
from keras.optimizers import Adam
from keras.losses import BinaryCrossentropy
"""# Data Preparation
Data Loading
"""
dataset, dataset_info = tfds.load('malaria', with_info = True, as_supervised = True, shuffle_files = True, split = ['train'])
for data in dataset[0].take(4):
print(data)
dataset
dataset_info
def splits(dataset, TRAIN_RATIO, VAL_RATIO, TEST_RATIO):
DATASET_SIZE = len(dataset)
train_dataset = dataset.take(int(TRAIN_RATIO*DATASET_SIZE))
val_test_dataset = dataset.skip(int(TRAIN_RATIODATASET_SIZE))
val_dataset = val_test_dataset.take(int(VAL_RATIODATASET_SIZE))
test_dataset = val_test_dataset.skip(int(VAL_RATIO*DATASET_SIZE))
return train_dataset, val_dataset, test_dataset
TRAIN_RATIO = 0.8
VAL_RATIO = 0.1
TEST_RATIO = 0.1
#dataset = tf.data.Dataset.range(10)
train_dataset, val_dataset, test_dataset = splits(dataset[0], TRAIN_RATIO, VAL_RATIO, TEST_RATIO)
print(list(train_dataset.take(1).as_numpy_iterator()), list(val_dataset.take(1).as_numpy_iterator()), list(test_dataset.take(1).as_numpy_iterator()))
dataset
"""# Data Visualization"""
for i, (image, label) in enumerate(train_dataset.take(16)):
ax = plt.subplot(4, 4, i + 1)
plt.imshow(image)
plt.title(dataset_info.features['label'].int2str(label))
plt.axis('off')
dataset_info.features['label'].int2str(0)
"""# Data Preprocessing"""
IM_SIZE = 224
def resize_rescale(image, label):
return tf.image.resize(image, (IM_SIZE, IM_SIZE))/255.0, label
train_dataset = train_dataset.map(resize_rescale)
val_dataset = val_dataset.map(resize_rescale)
test_dataset = test_dataset.map(resize_rescale)
train_dataset
val_dataset
test_dataset
for image, label in train_dataset.take(1):
print(image, label)
BATCH_SIZE = 32
train_dataset = train_dataset.shuffle(buffer_size = 8, reshuffle_each_iteration= True).batch(BATCH_SIZE), prefetch(tf.data.AUTOTUNE)
val_dataset = val_dataset.shuffle(buffer_size = 8, reshuffle_each_iteration= True).batch(BATCH_SIZE), prefetch(tf.data.AUTOTUNE)
train_dataset
"""# Model Creation and Training"""
lenet_model = Sequential([
InputLayer(input_shape=(IM_SIZE, IM_SIZE, 3)),
Conv2D(filters=6, kernel_size=5, strides=1, padding='valid', activation='sigmoid'),
MaxPool2D(pool_size=2, strides=2),
Conv2D(filters=16, kernel_size=5, strides=1, padding='valid', activation='sigmoid'),
MaxPool2D(pool_size=2, strides=2),
Flatten(),
Dense(100, activation="sigmoid"),
Dense(10, activation="sigmoid"),
Dense(1, activation="sigmoid"),
])
lenet_model.summary()
y_true = [0, 1, 0, 0]
y_pred = [0.6, 0.51, 0.94, 1]
bce = tf.keras.losses.BinaryCrossentropy()
bce(y_true, y_pred)
lenet_model.compile(optimizer=Adam(learning_rate=0.01), loss=BinaryCrossentropy())
history = lenet_model.fit(train_dataset, validation_data=val_dataset, epochs=100, verbose=1)
train_dataset
val_dataset
Beta Was this translation helpful? Give feedback.
All reactions