Tensor Tensor("predictions/Softmax:0",shape=(?,4),dtype=float32) is not an element of this graph

ValueError : Tensor Tensor("predictions/Softmax:0",shape=(?,4),dtype=float32) is not an element of this graph

原始问题及解决方案
https://github.com/keras-team/keras/issues/2397#issuecomment-254919212

问题描述:
在keras+tensorflow框架下训练神经网络并得到权重h5文件。
在之后需要调用的python代码中读取权重和图片并预测
在C#多线程的子线程调用python代码时出现以下报错
“ValueError : Tensor Tensor(“predictions/Softmax:0”, shape=(?, 2), dtype=float32) is not an element of this graph

解决方法:
主要是在读取权重后增加一行graph = tf.get_default_graph()

model = load_model()
graph = tf.get_default_graph()
并在需要预测时前加 with graph.as_default():

原始py文件代码

#-*- coding:utf-8 -*-
from keras.applications.vgg16 import preprocess_input,VGG16
from keras.layers import Dense
from keras.models import Model
import numpy as np
from PIL import Image
from keras.optimizers import SGD
import time
import cv2
from math import *
from scipy.stats import mode
import tensorflow as tf
from keras import backend as K
import os


def get_session(gpu_fraction=1.0):
    num_threads = os.environ.get('OMP_NUM_THREADS')
    gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=gpu_fraction)

    if num_threads:
        return tf.Session(config=tf.ConfigProto(
            gpu_options=gpu_options, intra_op_parallelism_threads=num_threads))
    else:
        return tf.Session(config=tf.ConfigProto(gpu_options=gpu_options))

def load_model():
    sgd = SGD(lr=0.00001, momentum=0.9)
    model = VGG16(weights=None, classes=4)
    # 加载模型权重
    model.load_weights('./models/modelAngle.h5', by_name=True)
    # 编译模型,以较小的学习率进行训练
    model.compile(optimizer=sgd, loss='categorical_crossentropy', metrics=['accuracy'])
    return model

# 加载模型
K.set_session(get_session())
model = load_model()

def predict(im):
    """
    图片文字方向预测
    """
    ROTATE = [0, 270, 180, 90]
    # im = cv2.imread(img)
    w, h, _ = im.shape
    thesh = 0.05
    xmin, ymin, xmax, ymax = int(thesh * w), int(thesh * h), w - int(thesh * w), h - int(thesh * h)
    #im = im.crop((xmin, ymin, xmax, ymax))  # 剪切图片边缘,清楚边缘噪声
    im = im[ymin:ymax, xmin:xmax]
    # im = im.resize((224, 224))
    im = cv2.resize(im, (224, 224))
    img = np.array(im)
    img = preprocess_input(img.astype(np.float32))
    pred = model.predict(np.array([img]))
    index = np.argmax(pred, axis=1)[0]
    return ROTATE[index]

def rotate(image, angle, center=None, scale=1.0): #1
    filled_color = -1
    if filled_color == -1:
        filled_color = mode([image[0, 0], image[0, -1],
                             image[-1, 0], image[-1, -1]]).mode[0]
    if np.array(filled_color).shape[0] == 2:
        if isinstance(filled_color, int):
            filled_color = (filled_color, filled_color, filled_color)
    else:
        filled_color = tuple([int(i) for i in filled_color])

    (h, w) = image.shape[:2] #2
    height, width = image.shape[:2]
    heightNew = int(width * fabs(sin(radians(angle))) + height * fabs(cos(radians(angle))))  # 这个公式参考之前内容
    widthNew = int(height * fabs(sin(radians(angle))) + width * fabs(cos(radians(angle))))

    matRotation = cv2.getRotationMatrix2D((width / 2, height / 2), angle, scale)  # 逆时针旋转 degree

    matRotation[0, 2] += (widthNew - width) / 2  # 因为旋转之后,坐标系原点是新图像的左上角,所以需要根据原图做转化
    matRotation[1, 2] += (heightNew - height) / 2

    imgRotation = cv2.warpAffine(image, matRotation, (widthNew, heightNew), borderValue=filled_color)
    # imgRotation = cv2.warpAffine(transform_img, matRotation, (widthNew, heightNew), borderVal

    return imgRotation #7

if __name__ == "__main__":
    t = time.time()
    img = cv2.imread("st2.png")
    degree = predict(img.copy())
    transform_img = rotate(img, degree)
    cv2.imwrite("st2a.png", transform_img)
    # print("旋转角度:"+str(predict("37.jpg"))+"°")
    # print(type(predict("37.jpg")))
    print("旋转检测时间:{:.2f}秒".format(time.time() - t))

修改后的代码

#-*- coding:utf-8 -*-
from keras.applications.vgg16 import preprocess_input,VGG16
from keras.layers import Dense
from keras.models import Model
import numpy as np
from PIL import Image
from keras.optimizers import SGD
import time
import cv2
from math import *
from scipy.stats import mode
import tensorflow as tf
from keras import backend as K
import os


def get_session(gpu_fraction=1.0):
    num_threads = os.environ.get('OMP_NUM_THREADS')
    gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=gpu_fraction)

    if num_threads:
        return tf.Session(config=tf.ConfigProto(
            gpu_options=gpu_options, intra_op_parallelism_threads=num_threads))
    else:
        return tf.Session(config=tf.ConfigProto(gpu_options=gpu_options))


def load_model():
    sgd = SGD(lr=0.00001, momentum=0.9)
    model = VGG16(weights=None, classes=4)
    # 加载模型权重
    model.load_weights('./models/modelAngle.h5', by_name=True)
    # 编译模型,以较小的学习率进行训练
    model.compile(optimizer=sgd, loss='categorical_crossentropy', metrics=['accuracy'])
    return model

# 加载模型
K.set_session(get_session(0.5))
model = load_model()
graph = tf.get_default_graph()

def predict(im):
    """
    图片文字方向预测
    """
    with graph.as_default():
        ROTATE = [0, 270, 180, 90]
        # im = cv2.imread(img)
        w, h, _ = im.shape
        thesh = 0.05
        xmin, ymin, xmax, ymax = int(thesh * w), int(thesh * h), w - int(thesh * w), h - int(thesh * h)
        #im = im.crop((xmin, ymin, xmax, ymax))  # 剪切图片边缘,清楚边缘噪声
        im = im[ymin:ymax, xmin:xmax]
        # im = im.resize((224, 224))
        im = cv2.resize(im, (224, 224))
        img = np.array(im)
        img = preprocess_input(img.astype(np.float32))
        pred = model.predict(np.array([img]))
        index = np.argmax(pred, axis=1)[0]
    return ROTATE[index]

def rotate(image, angle, center=None, scale=1.0): #1
    filled_color = -1
    if filled_color == -1:
        filled_color = mode([image[0, 0], image[0, -1],
                             image[-1, 0], image[-1, -1]]).mode[0]
    if np.array(filled_color).shape[0] == 2:
        if isinstance(filled_color, int):
            filled_color = (filled_color, filled_color, filled_color)
    else:
        filled_color = tuple([int(i) for i in filled_color])

    (h, w) = image.shape[:2] #2
    height, width = image.shape[:2]
    heightNew = int(width * fabs(sin(radians(angle))) + height * fabs(cos(radians(angle))))  # 这个公式参考之前内容
    widthNew = int(height * fabs(sin(radians(angle))) + width * fabs(cos(radians(angle))))

    matRotation = cv2.getRotationMatrix2D((width / 2, height / 2), angle, scale)  # 逆时针旋转 degree

    matRotation[0, 2] += (widthNew - width) / 2  # 因为旋转之后,坐标系原点是新图像的左上角,所以需要根据原图做转化
    matRotation[1, 2] += (heightNew - height) / 2

    imgRotation = cv2.warpAffine(image, matRotation, (widthNew, heightNew), borderValue=filled_color)
    # imgRotation = cv2.warpAffine(transform_img, matRotation, (widthNew, heightNew), borderVal

    return imgRotation #7

if __name__ == "__main__":
    t = time.time()
    img = cv2.imread("st2.png")
    degree = predict(img.copy())
    transform_img = rotate(img, degree)
    cv2.imwrite("st2a.png", transform_img)
    # print("旋转角度:"+str(predict("37.jpg"))+"°")
    # print(type(predict("37.jpg")))
    print("旋转检测时间:{:.2f}秒".format(time.time() - t))

然后错误解决了

perfect

猜你喜欢

转载自blog.csdn.net/zx_good_night/article/details/95310172
今日推荐