|
| 1 | +import tensorflow as tf |
| 2 | +from tensorflow.keras import datasets, layers, models |
| 3 | +from tensorflow.keras.preprocessing.image import ImageDataGenerator |
| 4 | +import matplotlib.pyplot as plt |
| 5 | + |
| 6 | +# Load dataset - here we're assuming the dataset is already prepared and split into folders (train, test) |
| 7 | +train_dir = "path_to_train_data" |
| 8 | +test_dir = "path_to_test_data" |
| 9 | + |
| 10 | +# Image augmentation for data preprocessing |
| 11 | +train_datagen = ImageDataGenerator( |
| 12 | + rescale=1./255, |
| 13 | + rotation_range=20, |
| 14 | + width_shift_range=0.2, |
| 15 | + height_shift_range=0.2, |
| 16 | + shear_range=0.2, |
| 17 | + zoom_range=0.2, |
| 18 | + horizontal_flip=True, |
| 19 | + fill_mode='nearest' |
| 20 | +) |
| 21 | + |
| 22 | +test_datagen = ImageDataGenerator(rescale=1./255) |
| 23 | + |
| 24 | +train_generator = train_datagen.flow_from_directory( |
| 25 | + train_dir, |
| 26 | + target_size=(224, 224), |
| 27 | + batch_size=32, |
| 28 | + class_mode='binary' # Since it's fake vs genuine logo classification |
| 29 | +) |
| 30 | + |
| 31 | +test_generator = test_datagen.flow_from_directory( |
| 32 | + test_dir, |
| 33 | + target_size=(224, 224), |
| 34 | + batch_size=32, |
| 35 | + class_mode='binary' |
| 36 | +) |
| 37 | + |
| 38 | +# Build a CNN model (simple version) |
| 39 | +model = models.Sequential([ |
| 40 | + layers.Conv2D(32, (3, 3), activation='relu', input_shape=(224, 224, 3)), |
| 41 | + layers.MaxPooling2D((2, 2)), |
| 42 | + layers.Conv2D(64, (3, 3), activation='relu'), |
| 43 | + layers.MaxPooling2D((2, 2)), |
| 44 | + layers.Conv2D(128, (3, 3), activation='relu'), |
| 45 | + layers.MaxPooling2D((2, 2)), |
| 46 | + layers.Flatten(), |
| 47 | + layers.Dense(512, activation='relu'), |
| 48 | + layers.Dense(1, activation='sigmoid') # Binary classification (fake vs genuine) |
| 49 | +]) |
| 50 | + |
| 51 | +# Compile the model |
| 52 | +model.compile(optimizer='adam', |
| 53 | + loss='binary_crossentropy', |
| 54 | + metrics=['accuracy']) |
| 55 | + |
| 56 | +# Train the model |
| 57 | +history = model.fit(train_generator, |
| 58 | + steps_per_epoch=100, |
| 59 | + epochs=10, |
| 60 | + validation_data=test_generator, |
| 61 | + validation_steps=50) |
| 62 | + |
| 63 | +# Plot training & validation accuracy and loss |
| 64 | +plt.plot(history.history['accuracy'], label='train accuracy') |
| 65 | +plt.plot(history.history['val_accuracy'], label='val accuracy') |
| 66 | +plt.legend() |
| 67 | +plt.show() |
| 68 | + |
| 69 | +plt.plot(history.history['loss'], label='train loss') |
| 70 | +plt.plot(history.history['val_loss'], label='val loss') |
| 71 | +plt.legend() |
| 72 | +plt.show() |
| 73 | + |
| 74 | +# Evaluate the model |
| 75 | +test_loss, test_acc = model.evaluate(test_generator, steps=50) |
| 76 | +print(f'Test accuracy: {test_acc}') |
0 commit comments