Самостоятельная работа 3 необходимо научить нейронной сети распознавать первые буквы инициалов



Download 33,5 Kb.
Sana16.07.2022
Hajmi33,5 Kb.
#810353
TuriСамостоятельная работа
Bog'liq
Самостоятельная работа 3


Самостоятельная работа 3

необходимо научить нейронной сети распознавать первые буквы инициалов




https://machinelearningmastery.com/how-to-develop-a-convolutional-neural-network-from-scratch-for-mnist-handwritten-digit-classification/


https://www.youtube.com/watch?v=K3ZarjvT24I

# make a prediction for a new image.


from numpy import argmax
from keras.preprocessing.image import load_img
from keras.preprocessing.image import img_to_array
from keras.models import load_model

# load and prepare the image


def load_image(filename):
# load the image
img = load_img(filename, color_mode = "grayscale", target_size=(28, 28,1))

#img_array = tf.keras.utils.img_to_array(img)


#img_array = tf.expand_dims(img_array, 0) # Create a batch
# convert to array
img = img_to_array(img)
# reshape into a single sample with 1 channel
img = img.reshape(1, 28, 28, 1)
# prepare pixel data
#img = img.astype('float32')
#img = img / 255.0
return img

# load an image and predict the class


def run_example():
# load the image
img = load_image('MNIST Dataset JPG format/MNIST - JPG - testing/9/7.jpg')
# load model
model = load_model('final_model.h5')
# predict the class
print(model)
predict_value = model.predict(img)
digit = argmax(predict_value)
print(digit)

# entry point, run the example


run_example()

# save the final model to file


from tensorflow.keras.datasets import mnist
from tensorflow.keras.utils import to_categorical
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Conv2D
from tensorflow.keras.layers import MaxPooling2D
from tensorflow.keras.layers import Dense
from tensorflow.keras.layers import Flatten
from tensorflow.keras.optimizers import SGD
import tensorflow as tf
import numpy as np
import pathlib
import matplotlib.pyplot as plt

data_dir = pathlib.Path('./MNIST Dataset JPG format/MNIST - JPG - training/')


batch_size = 1
img_height = 28
img_width = 28
def load_dataset():
# load dataset
trainX = tf.keras.utils.image_dataset_from_directory(
data_dir,
validation_split=0.2,
subset="training",
seed=123,
image_size=(img_height, img_width),
color_mode="grayscale",
batch_size=batch_size)
testX = tf.keras.utils.image_dataset_from_directory(
data_dir,
validation_split=0.2,
subset="validation",
seed=123,
image_size=(img_height, img_width),
color_mode="grayscale",
batch_size=batch_size)

#trainX = tf.reshape((trainX, 28, 28, 1))


#testX = tf.reshape((testX, 28, 28, 1))

return trainX, testX


# load train and test dataset


def load_dataset1():
# load dataset
(trainX, trainY), (testX, testY) = mnist.load_data()
# reshape dataset to have a single channel
trainX = trainX.reshape((trainX.shape[0], 28, 28, 1))
testX = testX.reshape((testX.shape[0], 28, 28, 1))
# one hot encode target values
trainY = to_categorical(trainY)
testY = to_categorical(testY)
return trainX, trainY, testX, testY

def prep_pixels1(train_ds, test):


# convert from integers to floats
normalization_layer = tf.keras.layers.Rescaling(1./255)
normalized_ds = train_ds.map(lambda x, y: (normalization_layer(x), y))

train_dsimage_batch, train_dslabels_batch = next(iter(normalized_ds))


first_image = train_dsimage_batch[0]
# Notice the pixel values are now in `[0,1]`.
print(np.min(first_image), np.max(first_image))

normalized_dstest = test.map(lambda x, y: (normalization_layer(x), y))


test_dsimage_batch, test_dslabels_batch = next(iter(normalized_dstest))

# return normalized images


return train_dsimage_batch, train_dslabels_batch, test_dsimage_batch, test_dslabels_batch
# scale pixels
def prep_pixels1(train, test):
# convert from integers to floats
train_norm = train.astype('float32')
test_norm = test.astype('float32')
# normalize to range 0-1
train_norm = train_norm / 255.0
test_norm = test_norm / 255.0
# return normalized images
return train_norm, test_norm

# define cnn model


def define_model():
model = Sequential()
model.add(tf.keras.layers.Rescaling(1./255, input_shape=(28, 28, 1)))
#model.add(Conv2D(32, 3, activation='relu'))
model.add(Conv2D(32, (3, 3), activation='relu', kernel_initializer='he_uniform'))
model.add(MaxPooling2D((2, 2)))
model.add(Conv2D(64, (3, 3), activation='relu', kernel_initializer='he_uniform'))
model.add(Conv2D(64, (3, 3), activation='relu', kernel_initializer='he_uniform'))
model.add(MaxPooling2D((2, 2)))
model.add(Flatten())
model.add(Dense(100, activation='relu', kernel_initializer='he_uniform'))
#model.add(Dense(10, activation='softmax'))
model.add(Dense(10))
# compile model
#model.compile(optimizer='adam', loss=tf.losses.SparseCategoricalCrossentropy(from_logits=True), metrics=['accuracy'])
opt = SGD(learning_rate=0.01, momentum=0.9)
model.compile(optimizer='adam', loss=tf.losses.SparseCategoricalCrossentropy(from_logits=True), metrics=['accuracy'])
#model.compile(optimizer=opt, loss='categorical_crossentropy', metrics=['accuracy'])
return model

# run the test harness for evaluating a model


def run_test_harness1():
# load dataset
train_ds, testX = load_dataset()
# prepare pixel data

class_names = train_ds.class_names


print(class_names)
for image, labels in train_ds:
batches = np.reshape(image, [batch_size, 28, 28, 1])
for i in range(9):
# define subplot
plt.subplot(3, 3, 1 + i)
# plot raw pixel data
image = np.reshape(batches[i], [28, 28, 1])
plt.imshow(image.astype("uint8"))
plt.imshow(image, cmap=plt.get_cmap('gray'))
plt.title(class_names[labels[i]])
# show the figure
plt.show()

#trainX, trainY, testX, testY = prep_pixels(trainX, testX)


class_names = train_ds.class_names
print(class_names)

for image_batch, labels_batch in train_ds:


print(image_batch.shape)
print(labels_batch.shape)
print(labels_batch)
break

normalization_layer = tf.keras.layers.Rescaling(1./255)


normalized_ds = train_ds.map(lambda x, y: (normalization_layer(x), y))


image_batch, labels_batch = next(iter(normalized_ds))
first_image = image_batch[1]
# Notice the pixel values are now in `[0,1]`.
print(np.min(first_image), np.mean(first_image), np.max(first_image))
#print('Train: X=%s, ' % (trainX.numpy.shape))
#print('Test: X=%s, ' % (testX.shape))
# define model
#model = define_model()
# fit model
#model.fit(trainX, trainY, epochs=10, batch_size=32, verbose=0)
#model.fit( trainX, validation_data=testX, epochs=3)
# save model
#model.save('final_model.h5')
def run_test_harness():
# load dataset
trainX, testX = load_dataset()
# prepare pixel data

# define model


model = define_model()
# fit model
#model.fit(trainX, trainY, epochs=10, batch_size=32, verbose=0)
AUTOTUNE = tf.data.AUTOTUNE
trainX = trainX.cache().prefetch(buffer_size=AUTOTUNE)
testX = testX.cache().prefetch(buffer_size=AUTOTUNE)
model.fit( trainX, validation_data=testX, epochs=3)
# save model
model.save('final_model.h5')

# entry point, run the test harness


run_test_harness()
Download 33,5 Kb.

Do'stlaringiz bilan baham:




Ma'lumotlar bazasi mualliflik huquqi bilan himoyalangan ©hozir.org 2024
ma'muriyatiga murojaat qiling

kiriting | ro'yxatdan o'tish
    Bosh sahifa
юртда тантана
Боғда битган
Бугун юртда
Эшитганлар жилманглар
Эшитмадим деманглар
битган бодомлар
Yangiariq tumani
qitish marakazi
Raqamli texnologiyalar
ilishida muhokamadan
tasdiqqa tavsiya
tavsiya etilgan
iqtisodiyot kafedrasi
steiermarkischen landesregierung
asarlaringizni yuboring
o'zingizning asarlaringizni
Iltimos faqat
faqat o'zingizning
steierm rkischen
landesregierung fachabteilung
rkischen landesregierung
hamshira loyihasi
loyihasi mavsum
faolyatining oqibatlari
asosiy adabiyotlar
fakulteti ahborot
ahborot havfsizligi
havfsizligi kafedrasi
fanidan bo’yicha
fakulteti iqtisodiyot
boshqaruv fakulteti
chiqarishda boshqaruv
ishlab chiqarishda
iqtisodiyot fakultet
multiservis tarmoqlari
fanidan asosiy
Uzbek fanidan
mavzulari potok
asosidagi multiservis
'aliyyil a'ziym
billahil 'aliyyil
illaa billahil
quvvata illaa
falah' deganida
Kompyuter savodxonligi
bo’yicha mustaqil
'alal falah'
Hayya 'alal
'alas soloh
Hayya 'alas
mavsum boyicha


yuklab olish