python - Tensorflow error: InvalidArgumentError: Different number of component types. -


i want input batches of shuffled images training, , write code according the generic input images in tensorvision, error. cannot figure wrong. code:

import os import tensorflow tf   def read_labeled_image_list(image_list_file):     """     read .txt file containing pathes , labeles.     parameters     ----------      image_list_file : .txt file 1 /path/to/image per line      label : optionally, if set label pasted after each line     returns     -------        list filenames in file image_list_file     """     f = open(image_list_file, 'r')     filenames = []     labels = []     line in f:         filename, label = line[:-1].split(' ')         filenames.append(filename)         labels.append(int(label))     return filenames, labels  def read_images_from_disk(input_queue):     """consumes single filename , label ' '-delimited string.     parameters     ----------       filename_and_label_tensor: scalar string tensor.     returns     -------       2 tensors: decoded image, , string label.     """     label = input_queue[1]     file_contents = tf.read_file(input_queue[0])     example = tf.image.decode_png(file_contents, channels=3) #    example = rescale_image(example)     # processed_label = label     return example, label  def random_resize(image, lower_size, upper_size):     """randomly resizes image     parameters     ----------     lower_size:     upper_size:     returns     -------       randomly resized image     """      new_size = tf.to_int32(         tf.random_uniform([], lower_size, upper_size))      return tf.image.resize_images(image, new_size, new_size,                                   method=0) def _input_pipeline(filename, batch_size,                     processing_image=lambda x: x,                     processing_label=lambda y: y,                     num_epochs=none):     """the input pipeline reading images classification data.     data should stored in single text file of using format:      /path/to/image_0 label_0      /path/to/image_1 label_1      /path/to/image_2 label_2      ...      args:        filename: path txt file        batch_size: size of batches produced        num_epochs: optionally limited amount of epochs     returns:        list filenames in file image_list_file     """      # reads pfathes of images there labels     image_list, label_list = read_labeled_image_list(filename)      images = tf.convert_to_tensor(image_list, dtype=tf.string)     labels = tf.convert_to_tensor(label_list, dtype=tf.int32)      # makes input queue     input_queue = tf.train.slice_input_producer([images, labels],                                                 num_epochs=num_epochs,                                                 shuffle=true)      # reads actual images     image, label = read_images_from_disk(input_queue)     pr_image = processing_image(image)     pr_label = processing_label(label)      image_batch, label_batch = tf.train.batch([pr_image, pr_label],                                               batch_size=batch_size,                                                shapes = [256,256,3])      # display training images in visualizer.     tensor_name = image.op.name     tf.image_summary(tensor_name + 'images', image_batch)     return image_batch, label_batch def test_pipeline():     data_folder = '/home/kang/documents/work_code_pc1/data/uclandusedimages/'     data_file = 'ucimage_labels.txt'      filename = os.path.join(data_folder, data_file)      image_batch, label_batch = _input_pipeline(filename, 75)      # create graph, etc.     init_op = tf.initialize_all_variables()     sess = tf.interactivesession()     sess.run(init_op)     coord = tf.train.coordinator()     threads = tf.train.start_queue_runners(sess=sess, coord=coord)      = sess.run([image_batch, label_batch])      coord.request_stop()     coord.join(threads)     print("finish test")     return  if __name__ == '__main__': #    aa = test_preprocc() #    matplotlib.pyplot.imshow(aa[1])     a1 = test_pipeline()     a2 = test_pipeline() 

but comes out error, confuses me long time:

traceback (most recent call last):    file "<ipython-input-7-e24901ce3365>", line 1, in <module>     runfile('/home/kang/documents/work_code_pc1/vgg_tensorflow_ucmerced/readuclandusedimagetxt1.py', wdir='/home/kang/documents/work_code_pc1/vgg_tensorflow_ucmerced')    file "/usr/local/lib/python2.7/dist-packages/spyderlib/widgets/externalshell/sitecustomize.py", line 714, in runfile     execfile(filename, namespace)    file "/usr/local/lib/python2.7/dist-packages/spyderlib/widgets/externalshell/sitecustomize.py", line 81, in execfile     builtins.execfile(filename, *where)    file "/home/kang/documents/work_code_pc1/vgg_tensorflow_ucmerced/readuclandusedimagetxt1.py", line 254, in <module>     a1 = test_pipeline()    file "/home/kang/documents/work_code_pc1/vgg_tensorflow_ucmerced/readuclandusedimagetxt1.py", line 244, in test_pipeline     = sess.run([image_batch, label_batch])    file "/usr/local/lib/python2.7/dist-packages/tensorflow/python/client/session.py", line 340, in run     run_metadata_ptr)    file "/usr/local/lib/python2.7/dist-packages/tensorflow/python/client/session.py", line 564, in _run     feed_dict_string, options, run_metadata)    file "/usr/local/lib/python2.7/dist-packages/tensorflow/python/client/session.py", line 637, in _do_run     target_list, options, run_metadata)    file "/usr/local/lib/python2.7/dist-packages/tensorflow/python/client/session.py", line 659, in _do_call     e.code)  invalidargumenterror: different number of component types.  types: uint8, int32, shapes: [[256,256,3]]      [[node: batch_11/fifo_queue = fifoqueue[capacity=32, component_types=[dt_uint8, dt_int32], container="", shapes=[[256,256,3]], shared_name="", _device="/job:localhost/replica:0/task:0/cpu:0"]()]] caused op u'batch_11/fifo_queue', defined at: 

the error due wrong argument shapes function tf.train.batch. argument shapes should left default, or should be:

shapes: (optional) shapes each example. defaults inferred shapes tensor_list

here giving shapes = [256, 256, 3], should give shape pr_image , pr_label in list:

image_batch, label_batch = tf.train.batch(     [pr_image, pr_label],     batch_size=batch_size,     shapes = [[256,256,3], pr_label.get_shape()]) 

Comments

Popular posts from this blog

wordpress - (T_ENDFOREACH) php error -

Export Excel workseet into txt file using vba - (text and numbers with formulas) -

Using django-mptt to get only the categories that have items -