Python: 3.6.10 Keras: 2.2.4 Tensorflow: 1.14.0 numpy: 1.16.4 sklearn: 0.22.2
create_datasets.py
from sklearn import model_selection
from PIL import Image
import os, glob
import numpy as np
import random
#Klassifizierungskategorie
name = "name"
root_dir = "./datasets/" + name +"/"
savenpy = "./npy/"+name+".npy"
categories = os.listdir(root_dir)
nb_classes = len(categories)
image_size = 224
#Lesen Sie die Bilddaten für jeden Ordner
X = [] #Bilddaten
Y = [] #Daten beschriften
for idx, category in enumerate(categories):
dir_path = root_dir + category
search_files = os.listdir(dir_path)
for file in search_files:
filepath = dir_path + "/" + file
img = Image.open(filepath)
img = img.convert("RGB")
#img = img.convert("L")
img = img.resize((image_size, image_size))
data = np.asarray(img)
X.append(data)
Y.append(idx)
X = np.array(X)
Y = np.array(Y)
print(len(X), len(Y))
# #Trainingsdaten und Testdaten trennen
X_train, X_test, y_train, y_test = model_selection.train_test_split(X, Y, test_size=0.2)
xy = (X_train, X_test, y_train, y_test)
#Speichern Sie das Numpy-Array in einer Datei
np.save(savenpy, xy)
print("end")
create_model.py
import keras
from keras.models import Sequential
from keras.layers import Convolution2D, MaxPooling2D, Conv2D
from keras.layers import Activation, Dropout, Flatten, Dense
from keras.utils import np_utils
import numpy as np
from keras.callbacks import EarlyStopping
#Zu klassifizierende Kategorie
name = "name"
root_dir = "./datasets/" + name +"/"
loadnpy = "./npy/"+name+".npy"
categories = os.listdir(root_dir)
nb_classes = len(categories)
#Definieren Sie die Hauptfunktion
def main():
X_train,X_test,y_train,y_test = np.load(loadnpy, allow_pickle=True)
#Normalisierung der Bilddatei
X_train = X_train.astype('float') / 256
X_test = X_test.astype('float') / 256
y_train = np_utils.to_categorical(y_train, nb_classes)
y_test = np_utils.to_categorical(y_test, nb_classes)
model = model_train(X_train,y_train)
model_eval(model,X_test,y_test)
#Modell lernen
def model_train(X,y):
model = Sequential()
model.add(Conv2D(32,(3,3), padding='same',input_shape=X.shape[1:]))
model.add(Activation('relu'))
model.add(Conv2D(32,(3,3)))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2,2)))
model.add(Dropout(0.25))
model.add(Conv2D(64,(3,3),padding='same'))
model.add(Activation('relu'))
model.add(Conv2D(64,(3,3)))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2,2)))
model.add(Dropout(0.25))
model.add(Flatten())
model.add(Dense(512))
model.add(Activation('relu'))
model.add(Dropout(0.5))
model.add(Dense(nb_classes))
model.add(Activation('softmax'))
model.summary()
#Optimierungsmethode
opt = keras.optimizers.rmsprop(lr=0.0001,decay=1e-6)
#Modell kompilieren
model.compile(loss='categorical_crossentropy',
optimizer=opt,metrics=['accuracy'])
#Modelllernen
model.fit(X, y, batch_size=32,epochs=100)
#Modell speichern
model.save('./save_model.h5')
return model
#Bewerten Sie das Modell
def model_eval(model, X, y):
score = model.evaluate(X, y)
print('loss=', score[0])
print('accuracy=', score[1])
if __name__ == "__main__":
main()
from keras.models import load_model
import numpy as np
from keras.preprocessing.image import img_to_array, load_img
img_path = 'Bilddateipfad, den Sie identifizieren möchten (jpg/PNG-Datei)'
model_file_path='Modelldateipfad (h5-Datei)'
root_dir = "./datasets/" + name +"/"#Pfad mit dem Namen des zu klassifizierenden Ordners
categories = os.listdir(root_dir)#Beschriftung vom Ordnernamen abrufen
model=load_model(model_file_path)
img = img_to_array(load_img(img_path, target_size=(224,224)))
img_nad = img_to_array(img)/255
img_nad = img_nad[None, ...]
pred = model.predict(img_nad, batch_size=1, verbose=0)
score = np.max(pred)
pred_label = categories[np.argmax(pred[0])]
print('name:',pred_label)
print('score:',score)
Recommended Posts