Invalidargumenterror graph execution error

Greetings everyone, I'm trying to build a NN for a multi-label text classification problem. When fitting the model, i got the "InvalidArgumentError: Graph execution" error, i'm su...

I got an error as mentioned below

InvalidArgumentError Traceback (most recent call last)
~AppDataLocalTempipykernel_72122888220523.py in
1 # print model summary
2
—-> 3 history = model.fit(x=X_train, validation_data=X_test,
4 epochs= epoch,
5 steps_per_epoch=X_train.samples//batch_size,

~AppDataRoamingPythonPython39site-packageskerasutilstraceback_utils.py in error_handler(*args, **kwargs)
68 # To get the full stack trace, call:
69 # tf.debugging.disable_traceback_filtering()
—> 70 raise e.with_traceback(filtered_tb) from None
71 finally:
72 del filtered_tb

~AppDataRoamingPythonPython39site-packagestensorflowpythoneagerexecute.py in quick_execute(op_name, num_outputs, inputs, attrs, ctx, name)
50 try:
51 ctx.ensure_initialized()
—> 52 tensors = pywrap_tfe.TFE_Py_Execute(ctx._handle, device_name, op_name,
53 inputs, attrs, num_outputs)
54 except core._NotOkStatusException as e:

InvalidArgumentError: Graph execution error:

Detected at node ‘categorical_crossentropy/softmax_cross_entropy_with_logits’ defined at (most recent call last):
File «C:ProgramDataAnaconda3librunpy.py», line 197, in _run_module_as_main
return _run_code(code, main_globals, None,
File «C:ProgramDataAnaconda3librunpy.py», line 87, in _run_code
exec(code, run_globals)
File «C:ProgramDataAnaconda3libsite-packagesipykernel_launcher.py», line 17, in
app.launch_new_instance()
File «C:ProgramDataAnaconda3libsite-packagestraitletsconfigapplication.py», line 846, in launch_instance
app.start()
File «C:ProgramDataAnaconda3libsite-packagesipykernelkernelapp.py», line 712, in start
self.io_loop.start()
File «C:ProgramDataAnaconda3libsite-packagestornadoplatformasyncio.py», line 199, in start
self.asyncio_loop.run_forever()
File «C:ProgramDataAnaconda3libasynciobase_events.py», line 601, in run_forever
self._run_once()
File «C:ProgramDataAnaconda3libasynciobase_events.py», line 1905, in _run_once
handle._run()
File «C:ProgramDataAnaconda3libasyncioevents.py», line 80, in _run
self._context.run(self._callback, *self._args)
File «C:ProgramDataAnaconda3libsite-packagesipykernelkernelbase.py», line 510, in dispatch_queue
await self.process_one()
File «C:ProgramDataAnaconda3libsite-packagesipykernelkernelbase.py», line 499, in process_one
await dispatch(*args)
File «C:ProgramDataAnaconda3libsite-packagesipykernelkernelbase.py», line 406, in dispatch_shell
await result
File «C:ProgramDataAnaconda3libsite-packagesipykernelkernelbase.py», line 730, in execute_request
reply_content = await reply_content
File «C:ProgramDataAnaconda3libsite-packagesipykernelipkernel.py», line 390, in do_execute
res = shell.run_cell(code, store_history=store_history, silent=silent)
File «C:ProgramDataAnaconda3libsite-packagesipykernelzmqshell.py», line 528, in run_cell
return super().run_cell(*args, **kwargs)
File «C:ProgramDataAnaconda3libsite-packagesIPythoncoreinteractiveshell.py», line 2914, in run_cell
result = self._run_cell(
File «C:ProgramDataAnaconda3libsite-packagesIPythoncoreinteractiveshell.py», line 2960, in _run_cell
return runner(coro)
File «C:ProgramDataAnaconda3libsite-packagesIPythoncoreasync_helpers.py», line 78, in pseudo_sync_runner
coro.send(None)
File «C:ProgramDataAnaconda3libsite-packagesIPythoncoreinteractiveshell.py», line 3185, in run_cell_async
has_raised = await self.run_ast_nodes(code_ast.body, cell_name,
File «C:ProgramDataAnaconda3libsite-packagesIPythoncoreinteractiveshell.py», line 3377, in run_ast_nodes
if (await self.run_code(code, result, async
=asy)):
File «C:ProgramDataAnaconda3libsite-packagesIPythoncoreinteractiveshell.py», line 3457, in run_code
exec(code_obj, self.user_global_ns, self.user_ns)
File «C:UsersTHADIBINDUMADHAVIAppDataLocalTempipykernel_72122888220523.py», line 3, in
history = model.fit(x=X_train, validation_data=X_test,
File «C:UsersTHADIBINDUMADHAVIAppDataRoamingPythonPython39site-packageskerasutilstraceback_utils.py», line 65, in error_handler
return fn(*args, **kwargs)
File «C:UsersTHADIBINDUMADHAVIAppDataRoamingPythonPython39site-packageskerasenginetraining.py», line 1650, in fit
tmp_logs = self.train_function(iterator)
File «C:UsersTHADIBINDUMADHAVIAppDataRoamingPythonPython39site-packageskerasenginetraining.py», line 1249, in train_function
return step_function(self, iterator)
File «C:UsersTHADIBINDUMADHAVIAppDataRoamingPythonPython39site-packageskerasenginetraining.py», line 1233, in step_function
outputs = model.distribute_strategy.run(run_step, args=(data,))
File «C:UsersTHADIBINDUMADHAVIAppDataRoamingPythonPython39site-packageskerasenginetraining.py», line 1222, in run_step
outputs = model.train_step(data)
File «C:UsersTHADIBINDUMADHAVIAppDataRoamingPythonPython39site-packageskerasenginetraining.py», line 1024, in train_step
loss = self.compute_loss(x, y, y_pred, sample_weight)
File «C:UsersTHADIBINDUMADHAVIAppDataRoamingPythonPython39site-packageskerasenginetraining.py», line 1082, in compute_loss
return self.compiled_loss(
File «C:UsersTHADIBINDUMADHAVIAppDataRoamingPythonPython39site-packageskerasenginecompile_utils.py», line 265, in call
loss_value = loss_obj(y_t, y_p, sample_weight=sw)
File «C:UsersTHADIBINDUMADHAVIAppDataRoamingPythonPython39site-packageskeraslosses.py», line 152, in call
losses = call_fn(y_true, y_pred)
File «C:UsersTHADIBINDUMADHAVIAppDataRoamingPythonPython39site-packageskeraslosses.py», line 284, in call
return ag_fn(y_true, y_pred, **self._fn_kwargs)
File «C:UsersTHADIBINDUMADHAVIAppDataRoamingPythonPython39site-packageskeraslosses.py», line 2004, in categorical_crossentropy
return backend.categorical_crossentropy(
File «C:UsersTHADIBINDUMADHAVIAppDataRoamingPythonPython39site-packageskerasbackend.py», line 5538, in categorical_crossentropy
return tf.nn.softmax_cross_entropy_with_logits(
Node: ‘categorical_crossentropy/softmax_cross_entropy_with_logits’
logits and labels must be broadcastable: logits_size=[32,6] labels_size=[32,10]
[[{{node categorical_crossentropy/softmax_cross_entropy_with_logits}}]] [Op:__inference_train_function_4067]

I’m having multiple errors while running this VGG training code (code and errors shown below). I don’t know if its because of my dataset or is it something else.

import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import tensorflow as tf
from tensorflow.keras.preprocessing import image
from tensorflow.keras.applications.vgg16 import preprocess_input
from tensorflow.keras.preprocessing.image import ImageDataGenerator
from sklearn.metrics.pairwise import cosine_similarity
import os
import scipy

train_directory = 'sign_data/train' #To be changed
test_directory = 'sign_data/test' #To be changed

train_datagen = ImageDataGenerator(
    rescale = 1./255,
    rotation_range = 0.1,
    width_shift_range = 0.2,
    height_shift_range = 0.2,
    shear_range = 0.1
)

train_generator = train_datagen.flow_from_directory(
    train_directory,
    target_size = (224, 224),
    color_mode = 'rgb',
    shuffle = True,
    batch_size=32
    
)


test_datagen = ImageDataGenerator(
    rescale = 1./255,
)

test_generator = test_datagen.flow_from_directory(
    test_directory,
    target_size = (224, 224),
    color_mode = 'rgb',
    shuffle = True,
    batch_size=32
)

from tensorflow.keras.applications.vgg16 import VGG16   
vgg_basemodel = VGG16(include_top=True)

from tensorflow.keras.callbacks import ReduceLROnPlateau, ModelCheckpoint, EarlyStopping

early_stopping = EarlyStopping(monitor='val_loss', mode='min', verbose=1, patience=5)

vgg_model = tf.keras.Sequential(vgg_basemodel.layers[:-1])
vgg_model.add(tf.keras.layers.Dense(10, activation = 'softmax'))

# Freezing original layers
for layer in vgg_model.layers[:-1]:
    layer.trainable = False

vgg_model.compile(loss='categorical_crossentropy',
                  optimizer=tf.keras.optimizers.SGD(momentum=0.9, learning_rate=0.001, decay=0.01),
                  metrics=['accuracy'])

history = vgg_model.fit(train_generator,
              epochs=30,
              batch_size=64,
              validation_data=test_generator,
              callbacks=[early_stopping])

# finetuning with all layers set trainable

for layer in vgg_model.layers:
    layer.trainable = True

vgg_model.compile(loss='categorical_crossentropy',
                  optimizer=tf.keras.optimizers.SGD(momentum=0.9, lr=0.0001),
                  metrics=['accuracy'])

history2 = vgg_model.fit(train_generator,
              epochs=5,
              batch_size=64,
              validation_data=test_generator,
              callbacks=[early_stopping])

vgg_model.save('saved_models/vgg_finetuned_model')

First error: Invalid Argument Error

    InvalidArgumentError                      Traceback (most recent call last)
<ipython-input-13-292bf57ef59f> in <module>()
     14               batch_size=64,
     15               validation_data=test_generator,
---> 16               callbacks=[early_stopping])
     17 
     18 # finetuning with all layers set trainable

    /usr/local/lib/python3.7/dist-packages/keras/utils/traceback_utils.py in error_handler(*args, **kwargs)
     65     except Exception as e:  # pylint: disable=broad-except
     66       filtered_tb = _process_traceback_frames(e.__traceback__)
---> 67       raise e.with_traceback(filtered_tb) from None
     68     finally:
     69       del filtered_tb

/usr/local/lib/python3.7/dist-packages/tensorflow/python/eager/execute.py in quick_execute(op_name, num_outputs, inputs, attrs, ctx, name)
     53     ctx.ensure_initialized()
     54     tensors = pywrap_tfe.TFE_Py_Execute(ctx._handle, device_name, op_name,
---> 55                                         inputs, attrs, num_outputs)
     56   except core._NotOkStatusException as e:
     57     if name is not None:

Second Error: Graph Execution Error

    InvalidArgumentError: Graph execution error:
Detected at node 'categorical_crossentropy/softmax_cross_entropy_with_logits' defined at (most recent call last):
    File "/usr/lib/python3.7/runpy.py", line 193, in _run_module_as_main
      "__main__", mod_spec)
    File "/usr/lib/python3.7/runpy.py", line 85, in _run_code
      exec(code, run_globals)
    File "/usr/local/lib/python3.7/dist-packages/ipykernel_launcher.py", line 16, in <module>
      app.launch_new_instance()
    File "/usr/local/lib/python3.7/dist-packages/traitlets/config/application.py", line 846, in launch_instance
      app.start()
    File "/usr/local/lib/python3.7/dist-packages/ipykernel/kernelapp.py", line 499, in start
      self.io_loop.start()
    File "/usr/local/lib/python3.7/dist-packages/tornado/platform/asyncio.py", line 132, in start
      self.asyncio_loop.run_forever()
    File "/usr/lib/python3.7/asyncio/base_events.py", line 541, in run_forever
      self._run_once()
    File "/usr/lib/python3.7/asyncio/base_events.py", line 1786, in _run_once
      handle._run()
    File "/usr/lib/python3.7/asyncio/events.py", line 88, in _run
      self._context.run(self._callback, *self._args)
    File "/usr/local/lib/python3.7/dist-packages/tornado/platform/asyncio.py", line 122, in _handle_events
      handler_func(fileobj, events)
    File "/usr/local/lib/python3.7/dist-packages/tornado/stack_context.py", line 300, in null_wrapper
      return fn(*args, **kwargs)
    File "/usr/local/lib/python3.7/dist-packages/zmq/eventloop/zmqstream.py", line 452, in _handle_events
      self._handle_recv()
    File "/usr/local/lib/python3.7/dist-packages/zmq/eventloop/zmqstream.py", line 481, in _handle_recv
      self._run_callback(callback, msg)
    File "/usr/local/lib/python3.7/dist-packages/zmq/eventloop/zmqstream.py", line 431, in _run_callback
      callback(*args, **kwargs)
    File "/usr/local/lib/python3.7/dist-packages/tornado/stack_context.py", line 300, in null_wrapper
      return fn(*args, **kwargs)
    File "/usr/local/lib/python3.7/dist-packages/ipykernel/kernelbase.py", line 283, in dispatcher
      return self.dispatch_shell(stream, msg)
    File "/usr/local/lib/python3.7/dist-packages/ipykernel/kernelbase.py", line 233, in dispatch_shell
      handler(stream, idents, msg)
    File "/usr/local/lib/python3.7/dist-packages/ipykernel/kernelbase.py", line 399, in execute_request
      user_expressions, allow_stdin)
    File "/usr/local/lib/python3.7/dist-packages/ipykernel/ipkernel.py", line 208, in do_execute
      res = shell.run_cell(code, store_history=store_history, silent=silent)
    File "/usr/local/lib/python3.7/dist-packages/ipykernel/zmqshell.py", line 537, in run_cell
      return super(ZMQInteractiveShell, self).run_cell(*args, **kwargs)
    File "/usr/local/lib/python3.7/dist-packages/IPython/core/interactiveshell.py", line 2718, in run_cell
      interactivity=interactivity, compiler=compiler, result=result)
    File "/usr/local/lib/python3.7/dist-packages/IPython/core/interactiveshell.py", line 2822, in run_ast_nodes
      if self.run_code(code, result):
    File "/usr/local/lib/python3.7/dist-packages/IPython/core/interactiveshell.py", line 2882, in run_code
      exec(code_obj, self.user_global_ns, self.user_ns)
    File "<ipython-input-13-292bf57ef59f>", line 16, in <module>
      callbacks=[early_stopping])
    File "/usr/local/lib/python3.7/dist-packages/keras/utils/traceback_utils.py", line 64, in error_handler
      return fn(*args, **kwargs)
    File "/usr/local/lib/python3.7/dist-packages/keras/engine/training.py", line 1384, in fit
      tmp_logs = self.train_function(iterator)
    File "/usr/local/lib/python3.7/dist-packages/keras/engine/training.py", line 1021, in train_function
      return step_function(self, iterator)
    File "/usr/local/lib/python3.7/dist-packages/keras/engine/training.py", line 1010, in step_function
      outputs = model.distribute_strategy.run(run_step, args=(data,))
    File "/usr/local/lib/python3.7/dist-packages/keras/engine/training.py", line 1000, in run_step
      outputs = model.train_step(data)
    File "/usr/local/lib/python3.7/dist-packages/keras/engine/training.py", line 860, in train_step
      loss = self.compute_loss(x, y, y_pred, sample_weight)
    File "/usr/local/lib/python3.7/dist-packages/keras/engine/training.py", line 919, in compute_loss
      y, y_pred, sample_weight, regularization_losses=self.losses)
    File "/usr/local/lib/python3.7/dist-packages/keras/engine/compile_utils.py", line 201, in __call__
      loss_value = loss_obj(y_t, y_p, sample_weight=sw)
    File "/usr/local/lib/python3.7/dist-packages/keras/losses.py", line 141, in __call__
      losses = call_fn(y_true, y_pred)
    File "/usr/local/lib/python3.7/dist-packages/keras/losses.py", line 245, in call
      return ag_fn(y_true, y_pred, **self._fn_kwargs)
    File "/usr/local/lib/python3.7/dist-packages/keras/losses.py", line 1790, in categorical_crossentropy
      y_true, y_pred, from_logits=from_logits, axis=axis)
    File "/usr/local/lib/python3.7/dist-packages/keras/backend.py", line 5099, in categorical_crossentropy
      labels=target, logits=output, axis=axis)
Node: 'categorical_crossentropy/softmax_cross_entropy_with_logits'
logits and labels must be broadcastable: logits_size=[32,10] labels_size=[32,128]
     [[{{node categorical_crossentropy/softmax_cross_entropy_with_logits}}]] [Op:__inference_train_function_11227]

I’m running this on google colaboratory. Is there a module that I should install? Or is it purely an error on the code itself?

Issue

I have the following preprocessing for a tensorflow neural-network:

import csv
from tensorflow.keras.preprocessing.text import Tokenizer
from tensorflow.keras.preprocessing.sequence import pad_sequences
import tensorflow as tf
from tensorflow.keras.layers import Input,Dense,LSTM,Flatten,GlobalAveragePooling1D,Embedding,Dropout

!wget --no-check-certificate 
    https://storage.googleapis.com/laurencemoroney-blog.appspot.com/bbc-text.csv 
    -O /tmp/bbc-text.csv



# Stopwords list from https://github.com/Yoast/YoastSEO.js/blob/develop/src/config/stopwords.js
# Convert it to a Python list and paste it here
stopwords = ["a", "about", "above", "after", "again", "against", "all", "am", "an", "and", "any", "are", "as", "at",
             "be", "because", "been", "before", "being", "below", "between", "both", "but", "by", "could", "did", "do",
             "does", "doing", "down", "during", "each", "few", "for", "from", "further", "had", "has", "have", "having",
             "he", "he'd", "he'll", "he's", "her", "here", "here's", "hers", "herself", "him", "himself", "his", "how",
             "how's", "i", "i'd", "i'll", "i'm", "i've", "if", "in", "into", "is", "it", "it's", "its", "itself",
             "let's", "me", "more", "most", "my", "myself", "nor", "of", "on", "once", "only", "or", "other", "ought",
             "our", "ours", "ourselves", "out", "over", "own", "same", "she", "she'd", "she'll", "she's", "should",
             "so", "some", "such", "than", "that", "that's", "the", "their", "theirs", "them", "themselves", "then",
             "there", "there's", "these", "they", "they'd", "they'll", "they're", "they've", "this", "those", "through",
             "to", "too", "under", "until", "up", "very", "was", "we", "we'd", "we'll", "we're", "we've", "were",
             "what", "what's", "when", "when's", "where", "where's", "which", "while", "who", "who's", "whom", "why",
             "why's", "with", "would", "you", "you'd", "you'll", "you're", "you've", "your", "yours", "yourself",
             "yourselves"]

#----------------------------------- Ream from Csv and remove the stopwords
sentences = []
labels = []
with open("/tmp/bbc-text.csv", 'r') as csvfile:
    reader = csv.reader(csvfile, delimiter=',')
    next(reader)
    for row in reader:
        labels.append(row[0])
        sentence = row[1]
        for word in stopwords:
            token = " " + word + " "
            sentence = sentence.replace(token, " ")
            sentence = sentence.replace(" ", " ")
        sentences.append(sentence)


#----------------------------------  Tokenize sentences
tokenizer = Tokenizer(oov_token="<OOV>")
tokenizer.fit_on_texts(sentences)
sequences = tokenizer.texts_to_sequences(sentences)
padded = pad_sequences(sequences, padding = 'post')

#--------------------------------- Tokenize labels
label_tokenizer = Tokenizer()
label_tokenizer.fit_on_texts(labels)
# label_word_index = label_tokenizer.word_index
label_seq = label_tokenizer.texts_to_sequences(labels)`

and finally here is the neural network which works by the prepared data:

train_sentence = tf.convert_to_tensor(padded,tf.int32)
train_label = tf.convert_to_tensor(label_seq,tf.int32)

input = Input(shape=(2441,))
x = Embedding(input_dim=10000,output_dim=128)(input)
x = LSTM(64,return_sequences=True)(x)
x = LSTM(64,return_sequences=True)(x)
x = LSTM(64,return_sequences=True)(x)
x = Dropout(0.2)(x)
x = LSTM(64)(x)
x = Flatten()(x)
output = Dense(5, activation='softmax')(x)
model = tf.keras.models.Model(input,output)

model.compile(loss='sparse_categorical_crossentropy', optimizer='adam', metrics=['accuracy'])

model.fit(x=train_sentence,y=train_label,epochs=10)

But, it fails with the following error:

InvalidArgumentError: Graph execution error:

Solution

The input_dim of the Embedding layer has to correspond to the size of your data’s vocabulary + 1. Also, your labels should begin from zero and not from one when using the sparse_categorical_crossentropy loss function. Here is a working example based on your code and data:

# ...
# ...
train_sentence = tf.convert_to_tensor(padded,tf.int32)
train_label = tf.convert_to_tensor(label_seq,tf.int32)
train_label = train_label - 1

input = Input(shape=(2441,))
x = Embedding(input_dim=len(tokenizer.word_index) + 1,output_dim=128)(input)
x = LSTM(64,return_sequences=True)(x)
x = LSTM(64,return_sequences=True)(x)
x = LSTM(64,return_sequences=True)(x)
x = Dropout(0.2)(x)
x = LSTM(64)(x)
x = Flatten()(x)
output = Dense(5, activation='softmax')(x)
model = tf.keras.models.Model(input,output)

model.compile(loss='sparse_categorical_crossentropy', optimizer='adam', metrics=['accuracy'])

model.fit(x=train_sentence,y=train_label,epochs=10)

Answered By — AloneTogether

    Table of contents

  • Tensorflow.errors.InvalidArgumentError — python examples
  • Tensorflow: InvalidArgumentError: Graph execution error:
  • InvalidArgumentError: Graph execution error: cnn model
  • UnimplementedError: Graph execution error: running nn on tensorflow
  • Model.fit gives InvalidArgumentError: Graph execution error:
  • Mlmd.errors.InvalidArgumentError
  • TensorFlow:InvalidArgumentError: Graph 执行错误:
  • Unknown error: Graph execution error
    #16425

Find the data you need here

We provide programming data of 20 most popular languages, hope to help you!

Previous PostNext Post

Tensorflow.errors.InvalidArgumentError — python examples

In graph mode, the call to # `tf.debugging.astert_*` only creates an Op, and the actual validation # happens when the graph is run. The behavior in graph mode may change in # the future to validate statically known arguments (e.g. `tf.constant`) at # Op-creation time.

  def testApplyFeatureMaskWithInvalidMaskNegative(self):
    """Test the apply_feature_mask function with mask value < 0."""
    features = [[1.0, 1.0], [2.0, 2.0]]
    mask = [-1.0, 1.0]
    # In eager mode, the arguments are validated once `tf.debugging.astert_*` is
    # called (in `utils.apply_feature_mask`). In graph mode, the call to
    # `tf.debugging.astert_*` only creates an Op, and the actual validation
    # happens when the graph is run. The behavior in graph mode may change in
    # the future to validate statically known arguments (e.g. `tf.constant`) at
    # Op-creation time. Enclosing both Op creation and evaluation is
    # an `astertRaises` block handles all cases.
    with self.astertRaises(tf.errors.InvalidArgumentError):
      masked_features = utils.apply_feature_mask(
          tf.constant(features), tf.constant(mask))
      self.evaluate(masked_features)
  @parameterized.parameters((vqvae.VectorQuantizer, {
      'embedding_dim': 4,
      'num_embeddings': 8,
      'commitment_cost': 0.25
  }), (vqvae.VectorQuantizerEMA, {
      'embedding_dim': 6,
      'num_embeddings': 13,
      'commitment_cost': 0.5,
      'decay': 0.1
  }))
  def testShapeChecking(self, constructor, kwargs):
    vqvae_module = constructor(**kwargs)
    wrong_shape_input = np.random.randn(100, kwargs['embedding_dim'] * 2)
    with self.astertRaisesRegexp(tf.errors.InvalidArgumentError,
                                 'but the requested shape has'):
      vqvae_module(
          tf.constant(wrong_shape_input.astype(np.float32)), is_training=False)
  def testTaggerParserNanDeath(self):
    hyperparam_config = spec_pb2.GridPoint()
    hyperparam_config.learning_rate = 1.0

    # The large learning rate should trigger check_numerics.
    with self.astertRaisesRegexp(tf.errors.InvalidArgumentError,
                                 'Cost is not finite'):
      self.RunFullTrainingAndInference(
          'tagger-parser',
          'tagger_parser_master_spec.textproto',
          hyperparam_config=hyperparam_config,
          component_weights=[0., 1., 1.],
          unroll_using_oracle=[False, True, True],
          expected_num_actions=12,
          expected=_TAGGER_PARSER_EXPECTED_SENTENCES)
  def testStructuredTrainingNotImplementedDeath(self):
    spec = self.LoadSpec('simple_parser_master_spec.textproto')

    # Make the 'parser' component have a beam at training time.
    self.astertEqual('parser', spec.component[0].name)
    spec.component[0].training_beam_size = 8

    # The training run should fail at runtime rather than build time.
    with self.astertRaisesRegexp(tf.errors.InvalidArgumentError,
                                 r'[Not implemented.]'):
      self.RunFullTrainingAndInference(
          'simple-parser',
          master_spec=spec,
          expected_num_actions=8,
          component_weights=[1],
          expected=_LABELED_PARSER_EXPECTED_SENTENCES)
  def testScalarHandling(self):
    with self.test_session(use_gpu=False) as sess:
      with self.astertRaisesRegexp(tf.errors.InvalidArgumentError,
                                   ".*labels must be 1-D.*"):
        labels = tf.placeholder(tf.int32, shape=[None, 1])
        logits = tf.placeholder(tf.float32, shape=[None, 3])
        ce = tf.nn.sparse_softmax_cross_entropy_with_logits(
            logits,
            tf.squeeze(labels))
        labels_v2 = np.zeros((1, 1), dtype=np.int32)
        logits_v2 = np.random.randn(1, 3)
        sess.run([ce], feed_dict={labels: labels_v2,
                                  logits: logits_v2})
    def test_invalid_time_string(self):
        with self.astertRaises(tf.errors.InvalidArgumentError):
            self.evaluate(
                text.parse_time(
                    time_string="INVALID",
                    time_format="%Y-%m-%dT%H:%M:%E*S%Ez",
                    output_unit="SECOND"))
  def testApplyFeatureMaskWithInvalidMaskTooLarge(self):
    """Test the apply_feature_mask function with mask value > 1."""
    features = [[1.0, 1.0], [2.0, 2.0]]
    mask = [1.0, 2.0]
    # In eager mode, the arguments are validated once `tf.debugging.astert_*` is
    # called (in `utils.apply_feature_mask`). In graph mode, the call to
    # `tf.debugging.astert_*` only creates an Op, and the actual validation
    # happens when the graph is run. The behavior in graph mode may change in
    # the future to validate statically known arguments (e.g. `tf.constant`) at
    # Op-creation time. Enclosing both Op creation and evaluation is
    # an `astertRaises` block handles all cases.
    with self.astertRaises(tf.errors.InvalidArgumentError):
      masked_features = utils.apply_feature_mask(
          tf.constant(features), tf.constant(mask))
      self.evaluate(masked_features)
  @parameterized.parameters(quadratic_radial_distortion.distortion_factor,
                            quadratic_radial_distortion.undistortion_factor)
  def test_both_negative_radius_exception_raised(self, distortion_function):
    """Tests that an exception is raised when the squared radius is negative."""
    squared_radii = _get_zeros_radii() - 0.5
    distortion_coefficient = _get_random_coefficient() - 0.5

    with self.astertRaises(tf.errors.InvalidArgumentError):
      self.evaluate(distortion_function(squared_radii, distortion_coefficient))
  @test_case.parameters(
      ([[1], [2]], [[1], [2], [3]], None, None, tf.errors.InvalidArgumentError,
       'Condition x == y did not hold element-wise:'),
      ([[1], [2], [3]], [[1], [2], [3]], [None, None], [None], ValueError,
       r'Shapes (None, None) and (None,) are incompatible'),
  )
  def test_same_shape_exceptions(self, x_input, y_input, x_shape, y_shape,
                                 exception_cls, error_string):

    with tf.compat.v1.Graph().as_default():
      x = tf.compat.v1.placeholder(tf.int32, x_shape)
      y = tf.compat.v1.placeholder(tf.int32, y_shape)
      with tf.compat.v1.Session() as sess:
        with self.astertRaisesRegexp(exception_cls, error_string):
          sess.run(tf_utils.astert_same_shape(x, y), {x: x_input, y: y_input})
    def testRightPaddedSequenceastertion(self):
        right_padded_sequence = [[True, True, False, False],
                                 [True, True, True, False]]
        left_padded_sequence = [[False, False, True, True],
                                [False, True, True, True]]

        astertion = sampler_py._check_sequence_is_right_padded(
            right_padded_sequence, False)
        self.evaluate(astertion)

        with self.astertRaises(tf.errors.InvalidArgumentError):
            astertion = sampler_py._check_sequence_is_right_padded(
                left_padded_sequence, False)
            self.evaluate(astertion)
  def testStructuredTrainingNotImplementedDeath(self):
    spec = self.LoadSpec('simple_parser_master_spec.textproto')

    # Make the 'parser' component have a beam at training time.
    self.astertEqual('parser', spec.component[0].name)
    spec.component[0].training_beam_size = 8

    # The training run should fail at runtime rather than build time.
    with self.astertRaisesRegexp(tf.errors.InvalidArgumentError,
                                 r'[Not implemented.]'):
      self.RunFullTrainingAndInference(
          'simple-parser',
          master_spec=spec,
          expected_num_actions=8,
          component_weights=[1],
          expected=_LABELED_PARSER_EXPECTED_SENTENCES)
  def testTaggerParserNanDeath(self):
    hyperparam_config = spec_pb2.GridPoint()
    hyperparam_config.learning_rate = 1.0

    # The large learning rate should trigger check_numerics.
    with self.astertRaisesRegexp(tf.errors.InvalidArgumentError,
                                 'Cost is not finite'):
      self.RunFullTrainingAndInference(
          'tagger-parser',
          'tagger_parser_master_spec.textproto',
          hyperparam_config=hyperparam_config,
          component_weights=[0., 1., 1.],
          unroll_using_oracle=[False, True, True],
          expected_num_actions=12,
          expected=_TAGGER_PARSER_EXPECTED_SENTENCES)
    def test_from_4D_image_with_invalid_data(self):
        with self.astertRaises((ValueError, tf.errors.InvalidArgumentError)):
            self.evaluate(
                img_utils.from_4D_image(tf.ones(shape=(2, 2, 4, 1)), 2))

        with self.astertRaises((ValueError, tf.errors.InvalidArgumentError)):
            self.evaluate(
                img_utils.from_4D_image(
                    tf.ones(shape=(2, 2, 4, 1)), tf.constant(2)))
  def testTaggerParserWithAttentionBatchDeath(self):
    spec = self.LoadSpec('tagger_parser_master_spec.textproto')

    # Make the 'parser' component attend to the 'tagger' component.
    self.astertEqual('tagger', spec.component[1].name)
    self.astertEqual('parser', spec.component[2].name)
    spec.component[2].attention_component = 'tagger'

    # Trying to run with a batch size greater than 1 should fail:
    with self.astertRaises(tf.errors.InvalidArgumentError):
      self.RunFullTrainingAndInference(
          'tagger-parser',
          master_spec=spec,
          component_weights=[0., 1., 1.],
          unroll_using_oracle=[False, True, True],
          expected_num_actions=9,
          expected=_TAGGER_PARSER_EXPECTED_SENTENCES)
  def test_evaluate_legendre_polynomial_exceptions_m_raised(self):
    """Tests that an exception is raised when m is not in the expected range."""
    l = np.random.randint(1, 4, size=(1,))
    m = np.random.randint(5, 10, size=(1,))
    x = np.random.uniform(size=(1,)) * 2.0

    with self.astertRaises(tf.errors.InvalidArgumentError):
      self.evaluate(spherical_harmonics.evaluate_legendre_polynomial(l, m, x))
  def testInvalidNegativeMultinomialDistribution(self):
    source_tensor = np.array([[1, 0, 0], [-0.1, 0.3, 0.8]])
    target_tensor = np.array([[1, 0, 0], [0.1, 0.9, 0]])
    with self.cached_session():
      with self.astertRaises(tf.errors.InvalidArgumentError):
        distance_tensor = distances.kl_divergence(
            source_tensor, target_tensor, axis=-1)
        distance_tensor.eval()
      with self.astertRaises(tf.errors.InvalidArgumentError):
        distance_tensor = distances.jensen_shannon_divergence(
            source_tensor, target_tensor, axis=-1)
        distance_tensor.eval()
    def test_size_exception(self):
        """Make sure it throws an exception for images that are too small."""
        shape = [1, 2, 1, 1]
        errors = (ValueError, tf.errors.InvalidArgumentError)
        with self.astertRaisesRegexp(errors, "Grid width must be at least 2."):
            self._check_interpolation_correctness(shape, "float32", "float32")
    def test_bad_int_shape(self):
        new_shape = 0
        n_splits = 2
        with self.astertRaisesRegexp(tf.errors.InvalidArgumentError, ""):
            self.run_test(new_shape, n_splits, (new_shape,) * 2, False)

        with self.astertRaisesRegexp(tf.errors.InvalidArgumentError, ""):
            self.run_test(new_shape, n_splits, (new_shape,) * 3, True)

        new_shape = 2
        n_splits = 0
        with self.astertRaisesRegexp(astertionError, ""):
            self.run_test(new_shape, n_splits, (new_shape,) * 2, False)

        with self.astertRaisesRegexp(astertionError, ""):
            self.run_test(new_shape, n_splits, (new_shape,) * 3, True)
  def test_evaluate_spherical_harmonics_exception_l_raised(self):
    """Tests that an exception is raised when l is not in the expected range."""
    l = np.random.randint(-10, -1, size=(1,))
    m = np.random.randint(5, 10, size=(1,))
    theta = np.random.uniform(0.0, np.pi, size=(1,))
    phi = np.random.uniform(0.0, 2.0 * np.pi, size=(1,))

    with self.astertRaises(tf.errors.InvalidArgumentError):
      self.evaluate(
          spherical_harmonics.evaluate_spherical_harmonics(l, m, theta, phi))
  def testStructuredTrainingNotImplementedDeath(self):
    spec = self.LoadSpec('simple_parser_master_spec.textproto')

    # Make the 'parser' component have a beam at training time.
    self.astertEqual('parser', spec.component[0].name)
    spec.component[0].training_beam_size = 8

    # The training run should fail at runtime rather than build time.
    with self.astertRaisesRegexp(tf.errors.InvalidArgumentError,
                                 r'[Not implemented.]'):
      self.RunFullTrainingAndInference(
          'simple-parser',
          master_spec=spec,
          expected_num_actions=8,
          component_weights=[1],
          expected=_LABELED_PARSER_EXPECTED_SENTENCES)
  def testInvalid(self):
    x_np = np.array([[1, 2, 3], [4, 5, 6]], dtype=np.float32)
    with self.test_session():
      with self.astertRaisesRegexp(tf.errors.InvalidArgumentError,
                                   "is out of valid range"):
        array_ops.reverse_v2(x_np, [-30]).eval()
      with self.astertRaisesRegexp(tf.errors.InvalidArgumentError,
                                   "is out of valid range"):
        array_ops.reverse_v2(x_np, [2]).eval()
      with self.astertRaisesRegexp(tf.errors.InvalidArgumentError,
                                   "axis 0 specified more than once"):
        array_ops.reverse_v2(x_np, [0, -2]).eval()
  def testStructuredTrainingNotImplementedDeath(self):
    spec = self.LoadSpec('simple_parser_master_spec.textproto')

    # Make the 'parser' component have a beam at training time.
    self.astertEqual('parser', spec.component[0].name)
    spec.component[0].training_beam_size = 8

    # The training run should fail at runtime rather than build time.
    with self.astertRaisesRegexp(tf.errors.InvalidArgumentError,
                                 r'[Not implemented.]'):
      self.RunFullTrainingAndInference(
          'simple-parser',
          master_spec=spec,
          expected_num_actions=8,
          component_weights=[1],
          expected=_LABELED_PARSER_EXPECTED_SENTENCES)
  @parameterized.parameters(
      (((-1.0, 1.0), (1.0, 1.0), (3.0, 1.0), (-1.0, -1.0), (1.0, -1.0),
        (3.0, -1.0)), ((1.0, -1.0, 1.0, -1.0), (0.0, 0.0, 0.0, 0.0)),
       (((0,), (1,), (3,), (4,)), ((1,), (2,), (4,), (5,))), ((0.0, 0.0),
                                                              (0.0, 0.0))))
  def test_interpolate_unnormalizable_raised_(self, points, weights, indices,
                                              out):
    """Tests whether exception is raised when weights are unnormalizable."""
    with self.astertRaises(tf.errors.InvalidArgumentError):
      result = weighted.interpolate(
          points=points,
          weights=weights,
          indices=indices,
          normalize=True,
          allow_negative_weights=True)
      self.evaluate(result)
  def testTaggerParserWithAttentionBatchDeath(self):
    spec = self.LoadSpec('tagger_parser_master_spec.textproto')

    # Make the 'parser' component attend to the 'tagger' component.
    self.astertEqual('tagger', spec.component[1].name)
    self.astertEqual('parser', spec.component[2].name)
    spec.component[2].attention_component = 'tagger'

    # Trying to run with a batch size greater than 1 should fail:
    with self.astertRaises(tf.errors.InvalidArgumentError):
      self.RunFullTrainingAndInference(
          'tagger-parser',
          master_spec=spec,
          component_weights=[0., 1., 1.],
          unroll_using_oracle=[False, True, True],
          expected_num_actions=9,
          expected=_TAGGER_PARSER_EXPECTED_SENTENCES)
    def test_invalid_image(self):
        msg = "`image` must be 2/3/4D tensor"
        errors = (ValueError, tf.errors.InvalidArgumentError)
        for image_shape in [(1,), (16, 28, 28, 1, 1)]:
            with self.subTest(dim=len(image_shape)):
                with self.astertRaisesRegexp(errors, msg):
                    image = tf.ones(shape=image_shape)
                    self.evaluate(mean_filter2d(image))
  def testStructuredTrainingNotImplementedDeath(self):
    spec = self.LoadSpec('simple_parser_master_spec.textproto')

    # Make the 'parser' component have a beam at training time.
    self.astertEqual('parser', spec.component[0].name)
    spec.component[0].training_beam_size = 8

    # The training run should fail at runtime rather than build time.
    with self.astertRaisesRegexp(tf.errors.InvalidArgumentError,
                                 r'[Not implemented.]'):
      self.RunFullTrainingAndInference(
          'simple-parser',
          master_spec=spec,
          expected_num_actions=8,
          component_weights=[1],
          expected=_LABELED_PARSER_EXPECTED_SENTENCES)
  def testTaggerParserWithAttentionBatchDeath(self):
    spec = self.LoadSpec('tagger_parser_master_spec.textproto')

    # Make the 'parser' component attend to the 'tagger' component.
    self.astertEqual('tagger', spec.component[1].name)
    self.astertEqual('parser', spec.component[2].name)
    spec.component[2].attention_component = 'tagger'

    # Trying to run with a batch size greater than 1 should fail:
    with self.astertRaises(tf.errors.InvalidArgumentError):
      self.RunFullTrainingAndInference(
          'tagger-parser',
          master_spec=spec,
          component_weights=[0., 1., 1.],
          unroll_using_oracle=[False, True, True],
          expected_num_actions=9,
          expected=_TAGGER_PARSER_EXPECTED_SENTENCES)
    def test_invalid_time_format(self):
        with self.astertRaises(tf.errors.InvalidArgumentError):
            self.evaluate(
                text.parse_time(
                    time_string="2019-05-17T23:56:09.05Z",
                    time_format="INVALID",
                    output_unit="SECOND"))
  def testTaggerParserNanDeath(self):
    hyperparam_config = spec_pb2.GridPoint()
    hyperparam_config.learning_rate = 1.0

    # The large learning rate should trigger check_numerics.
    with self.astertRaisesRegexp(tf.errors.InvalidArgumentError,
                                 'Cost is not finite'):
      self.RunFullTrainingAndInference(
          'tagger-parser',
          'tagger_parser_master_spec.textproto',
          hyperparam_config=hyperparam_config,
          component_weights=[0., 1., 1.],
          unroll_using_oracle=[False, True, True],
          expected_num_actions=12,
          expected=_TAGGER_PARSER_EXPECTED_SENTENCES)
  def test_evaluate_spherical_harmonics_exception_theta_raised(self):
    """Tests exceptions on the values of theta."""
    l = np.random.randint(1, 4, size=(1,))
    m = np.random.randint(5, 10, size=(1,))
    theta = np.random.uniform(
        np.pi + sys.float_info.epsilon, 2.0 * np.pi, size=(1,))
    phi = np.random.uniform(0.0, 2.0 * np.pi, size=(1,))

    with self.astertRaises(tf.errors.InvalidArgumentError):
      self.evaluate(
          spherical_harmonics.evaluate_spherical_harmonics(l, m, theta, phi))

    theta = np.random.uniform(-np.pi, 0.0 - sys.float_info.epsilon, size=(1,))

    with self.astertRaises(tf.errors.InvalidArgumentError):
      self.evaluate(
          spherical_harmonics.evaluate_spherical_harmonics(l, m, theta, phi))
  def testTaggerParserNanDeath(self):
    hyperparam_config = spec_pb2.GridPoint()
    hyperparam_config.learning_rate = 1.0

    # The large learning rate should trigger check_numerics.
    with self.astertRaisesRegexp(tf.errors.InvalidArgumentError,
                                 'Cost is not finite'):
      self.RunFullTrainingAndInference(
          'tagger-parser',
          'tagger_parser_master_spec.textproto',
          hyperparam_config=hyperparam_config,
          component_weights=[0., 1., 1.],
          unroll_using_oracle=[False, True, True],
          expected_num_actions=12,
          expected=_TAGGER_PARSER_EXPECTED_SENTENCES)
    def test_to_4D_image_with_invalid_shape(self):
        errors = (ValueError, tf.errors.InvalidArgumentError)
        with self.astertRaisesRegexp(errors, '`image` must be 2/3/4D tensor'):
            img_utils.to_4D_image(tf.ones(shape=(1,)))

        with self.astertRaisesRegexp(errors, '`image` must be 2/3/4D tensor'):
            img_utils.to_4D_image(tf.ones(shape=(1, 2, 4, 3, 2)))
  def testTaggerParserWithAttentionBatchDeath(self):
    spec = self.LoadSpec('tagger_parser_master_spec.textproto')

    # Make the 'parser' component attend to the 'tagger' component.
    self.astertEqual('tagger', spec.component[1].name)
    self.astertEqual('parser', spec.component[2].name)
    spec.component[2].attention_component = 'tagger'

    # Trying to run with a batch size greater than 1 should fail:
    with self.astertRaises(tf.errors.InvalidArgumentError):
      self.RunFullTrainingAndInference(
          'tagger-parser',
          master_spec=spec,
          component_weights=[0., 1., 1.],
          unroll_using_oracle=[False, True, True],
          expected_num_actions=9,
          expected=_TAGGER_PARSER_EXPECTED_SENTENCES)
  @parameterized.parameters((np.float32), (np.float64))
  def test_astert_rotation_matrix_normalized_preset(self, dtype):
    """Checks that astert_normalized function works as expected."""
    angles = test_helpers.generate_preset_test_euler_angles().astype(dtype)

    matrix = rotation_matrix_3d.from_euler(angles)
    matrix_rescaled = matrix * 1.01
    matrix_normalized = rotation_matrix_3d.astert_rotation_matrix_normalized(
        matrix)
    self.evaluate(matrix_normalized)

    with self.astertRaises(tf.errors.InvalidArgumentError):
      rescaled_normalized = rotation_matrix_3d.astert_rotation_matrix_normalized(
          matrix_rescaled)
      self.evaluate(rescaled_normalized)
  def testInvalidMultinomialDistribution(self):
    source_tensor = np.array([[1, 0, 0], [0.1, 0.2, 0.8]])
    target_tensor = np.array([[1, 0, 0], [0.1, 0.9, 0]])
    with self.cached_session():
      with self.astertRaises(tf.errors.InvalidArgumentError):
        distance_tensor = distances.kl_divergence(
            source_tensor, target_tensor, axis=-1)
        distance_tensor.eval()
      with self.astertRaises(tf.errors.InvalidArgumentError):
        distance_tensor = distances.jensen_shannon_divergence(
            source_tensor, target_tensor, axis=-1)
        distance_tensor.eval()
    def test_invalid_output_unit(self):
        errors = (ValueError, tf.errors.InvalidArgumentError)
        with self.astertRaises(errors):
            text.parse_time(
                time_string="2019-05-17T23:56:09.05Z",
                time_format="%Y-%m-%dT%H:%M:%E*S%Ez",
                output_unit="INVALID")
  def testastertions(self):
    """Tests that astertions still work with Keras."""
    distance_config = configs.DistanceConfig(
        distance_type=configs.DistanceType.JENSEN_SHANNON_DIVERGENCE,
        sum_over_axis=-1)
    regularizer = pairwise_distance_lib.PairwiseDistance(distance_config)
    # Try Jennsen-Shannon divergence on an improper probability distribution.
    with self.astertRaisesRegex(
        tf.errors.InvalidArgumentError,
        'x and/or y is not a proper probability distribution'):
      self.evaluate(regularizer(np.array([0.6, 0.5]), np.array([[0.25, 0.75]])))
  def test_evaluate_legendre_polynomial_exceptions_l_raised(self):
    """Tests that an exception is raised when l is not in the expected range."""
    l = np.random.randint(-10, -1, size=(1,))
    m = np.random.randint(5, 10, size=(1,))
    x = np.random.uniform(size=(1,)) * 2.0

    with self.astertRaises(tf.errors.InvalidArgumentError):
      self.evaluate(spherical_harmonics.evaluate_legendre_polynomial(l, m, x))
  def testTaggerParserWithAttentionBatchDeath(self):
    spec = self.LoadSpec('tagger_parser_master_spec.textproto')

    # Make the 'parser' component attend to the 'tagger' component.
    self.astertEqual('tagger', spec.component[1].name)
    self.astertEqual('parser', spec.component[2].name)
    spec.component[2].attention_component = 'tagger'

    # Trying to run with a batch size greater than 1 should fail:
    with self.astertRaises(tf.errors.InvalidArgumentError):
      self.RunFullTrainingAndInference(
          'tagger-parser',
          master_spec=spec,
          component_weights=[0., 1., 1.],
          unroll_using_oracle=[False, True, True],
          expected_num_actions=9,
          expected=_TAGGER_PARSER_EXPECTED_SENTENCES)
  def testStructuredTrainingNotImplementedDeath(self):
    spec = self.LoadSpec('simple_parser_master_spec.textproto')

    # Make the 'parser' component have a beam at training time.
    self.astertEqual('parser', spec.component[0].name)
    spec.component[0].training_beam_size = 8

    # The training run should fail at runtime rather than build time.
    with self.astertRaisesRegexp(tf.errors.InvalidArgumentError,
                                 r'[Not implemented.]'):
      self.RunFullTrainingAndInference(
          'simple-parser',
          master_spec=spec,
          expected_num_actions=8,
          component_weights=[1],
          expected=_LABELED_PARSER_EXPECTED_SENTENCES)
  def testBadTarget(self):
    predictions = [[0.1, 0.3, 0.2, 0.4], [0.1, 0.2, 0.3, 0.4]]
    target = [0, 80000]
    with self.test_session():
      with self.astertRaisesRegexp(tf.errors.InvalidArgumentError,
                                   "target.*out of range"):
        tf.nn.in_top_k(predictions, target, 2).eval()
  @parameterized.parameters(
      (3, 4, 2, 3),
      (5, 4, 5, 3),
      (5, 6, 5, 5),
      (2, 6, 5, 1),
  )
  def test_interpolate_negative_weights_raised(self, dim_points, num_points,
                                               num_outputs,
                                               num_pts_to_interpolate):
    """Tests whether exception is raised when weights are negative."""
    points, weights, indices = self._get_tensors_from_shapes(
        num_points, dim_points, num_outputs, num_pts_to_interpolate)
    weights *= -1.0

    with self.astertRaises(tf.errors.InvalidArgumentError):
      result = weighted.interpolate(
          points=points, weights=weights, indices=indices, normalize=True)
      self.evaluate(result)
  def testStructuredTrainingNotImplementedDeath(self):
    spec = self.LoadSpec('simple_parser_master_spec.textproto')

    # Make the 'parser' component have a beam at training time.
    self.astertEqual('parser', spec.component[0].name)
    spec.component[0].training_beam_size = 8

    # The training run should fail at runtime rather than build time.
    with self.astertRaisesRegexp(tf.errors.InvalidArgumentError,
                                 r'[Not implemented.]'):
      self.RunFullTrainingAndInference(
          'simple-parser',
          master_spec=spec,
          expected_num_actions=8,
          component_weights=[1],
          expected=_LABELED_PARSER_EXPECTED_SENTENCES)
  def testTaggerParserWithAttentionBatchDeath(self):
    spec = self.LoadSpec('tagger_parser_master_spec.textproto')

    # Make the 'parser' component attend to the 'tagger' component.
    self.astertEqual('tagger', spec.component[1].name)
    self.astertEqual('parser', spec.component[2].name)
    spec.component[2].attention_component = 'tagger'

    # Trying to run with a batch size greater than 1 should fail:
    with self.astertRaises(tf.errors.InvalidArgumentError):
      self.RunFullTrainingAndInference(
          'tagger-parser',
          master_spec=spec,
          component_weights=[0., 1., 1.],
          unroll_using_oracle=[False, True, True],
          expected_num_actions=9,
          expected=_TAGGER_PARSER_EXPECTED_SENTENCES)
    def test_from_4D_image_with_invalid_shape(self):
        errors = (ValueError, tf.errors.InvalidArgumentError)
        for rank in 2, tf.constant(2):
            with self.subTest(rank=rank):
                with self.astertRaisesRegexp(errors,
                                             '`image` must be 4D tensor'):
                    img_utils.from_4D_image(tf.ones(shape=(2, 4)), rank)

                with self.astertRaisesRegexp(errors,
                                             '`image` must be 4D tensor'):
                    img_utils.from_4D_image(tf.ones(shape=(2, 4, 1)), rank)

                with self.astertRaisesRegexp(errors,
                                             '`image` must be 4D tensor'):
                    img_utils.from_4D_image(
                        tf.ones(shape=(1, 2, 4, 1, 1)), rank)
  def test_evaluate_spherical_harmonics_exception_phi_raised(self):
    """Tests exceptions on the values of phi."""
    l = np.random.randint(1, 4, size=(1,))
    m = np.random.randint(5, 10, size=(1,))
    theta = np.random.uniform(0.0, np.pi, size=(1,))
    phi = np.random.uniform(
        2.0 * np.pi + sys.float_info.epsilon, 4.0 * np.pi, size=(1,))

    with self.astertRaises(tf.errors.InvalidArgumentError):
      self.evaluate(
          spherical_harmonics.evaluate_spherical_harmonics(l, m, theta, phi))

    phi = np.random.uniform(
        -2.0 * np.pi, 0.0 - sys.float_info.epsilon, size=(1,))

    with self.astertRaises(tf.errors.InvalidArgumentError):
      self.evaluate(
          spherical_harmonics.evaluate_spherical_harmonics(l, m, theta, phi))
  @parameterized.parameters(
      ((0., 0., 0.), (0., 0., 0.), (0., 0., 0.)),
      ((1., 0., 0.), (0., 0., 0.), (0., 0., 0.)),
      ((0., 0., 0.), (0., 1., 0.), (0., 0., 0.)),
      ((0., 0., 0.), (0., 0., 0.), (0., 0., 1.)),
  )
  def test_normal_astert(self, v0, v1, v2):
    """Tests the triangle normal astertion."""
    with self.astertRaises(tf.errors.InvalidArgumentError):
      self.evaluate(triangle.normal(v0, v1, v2))
  def test_evaluate_spherical_harmonics_exception_m_raised(self):
    """Tests that an exception is raised when m is not in the expected range."""
    l = np.random.randint(1, 4, size=(1,))
    m = np.random.randint(5, 10, size=(1,))
    theta = np.random.uniform(0.0, np.pi, size=(1,))
    phi = np.random.uniform(0.0, 2.0 * np.pi, size=(1,))

    with self.astertRaises(tf.errors.InvalidArgumentError):
      self.evaluate(
          spherical_harmonics.evaluate_spherical_harmonics(l, m, theta, phi))

    m = np.random.randint(-10, -5, size=(1,))

    with self.astertRaises(tf.errors.InvalidArgumentError):
      self.evaluate(
          spherical_harmonics.evaluate_spherical_harmonics(l, m, theta, phi))
    def test_invalid_image(self):
        msg = "`image` must be 2/3/4D tensor"
        errors = (ValueError, tf.errors.InvalidArgumentError)
        for image_shape in [(1,), (16, 28, 28, 1, 1)]:
            with self.subTest(dim=len(image_shape)):
                with self.astertRaisesRegexp(errors, msg):
                    image = tf.ones(shape=image_shape)
                    self.evaluate(median_filter2d(image))
  def testTaggerParserNanDeath(self):
    hyperparam_config = spec_pb2.GridPoint()
    hyperparam_config.learning_rate = 1.0

    # The large learning rate should trigger check_numerics.
    with self.astertRaisesRegexp(tf.errors.InvalidArgumentError,
                                 'Cost is not finite'):
      self.RunFullTrainingAndInference(
          'tagger-parser',
          'tagger_parser_master_spec.textproto',
          hyperparam_config=hyperparam_config,
          component_weights=[0., 1., 1.],
          unroll_using_oracle=[False, True, True],
          expected_num_actions=12,
          expected=_TAGGER_PARSER_EXPECTED_SENTENCES)

Tensorflow: InvalidArgumentError: Graph execution error:

1 Answer. The input_dim of the Embedding layer has to correspond to the size of your data’s vocabulary + 1. Also, your labels should begin from zero and not from one when using sparse_categorical_crossentropy. Here is a working example:

import csv
from tensorflow.keras.preprocessing.text import Tokenizer
from tensorflow.keras.preprocessing.sequence import pad_sequences
import tensorflow as tf
from tensorflow.keras.layers import Input,Dense,LSTM,Flatten,GlobalAveragePooling1D,Embedding,Dropout

!wget --no-check-certificate 
    https://storage.googleapis.com/laurencemoroney-blog.appspot.com/bbc-text.csv 
    -O /tmp/bbc-text.csv



# Stopwords list from https://github.com/Yoast/YoastSEO.js/blob/develop/src/config/stopwords.js
# Convert it to a Python list and paste it here
stopwords = ["a", "about", "above", "after", "again", "against", "all", "am", "an", "and", "any", "are", "as", "at",
             "be", "because", "been", "before", "being", "below", "between", "both", "but", "by", "could", "did", "do",
             "does", "doing", "down", "during", "each", "few", "for", "from", "further", "had", "has", "have", "having",
             "he", "he'd", "he'll", "he's", "her", "here", "here's", "hers", "herself", "him", "himself", "his", "how",
             "how's", "i", "i'd", "i'll", "i'm", "i've", "if", "in", "into", "is", "it", "it's", "its", "itself",
             "let's", "me", "more", "most", "my", "myself", "nor", "of", "on", "once", "only", "or", "other", "ought",
             "our", "ours", "ourselves", "out", "over", "own", "same", "she", "she'd", "she'll", "she's", "should",
             "so", "some", "such", "than", "that", "that's", "the", "their", "theirs", "them", "themselves", "then",
             "there", "there's", "these", "they", "they'd", "they'll", "they're", "they've", "this", "those", "through",
             "to", "too", "under", "until", "up", "very", "was", "we", "we'd", "we'll", "we're", "we've", "were",
             "what", "what's", "when", "when's", "where", "where's", "which", "while", "who", "who's", "whom", "why",
             "why's", "with", "would", "you", "you'd", "you'll", "you're", "you've", "your", "yours", "yourself",
             "yourselves"]

#----------------------------------- Ream from Csv and remove the stopwords
sentences = []
labels = []
with open("/tmp/bbc-text.csv", 'r') as csvfile:
    reader = csv.reader(csvfile, delimiter=',')
    next(reader)
    for row in reader:
        labels.append(row[0])
        sentence = row[1]
        for word in stopwords:
            token = " " + word + " "
            sentence = sentence.replace(token, " ")
            sentence = sentence.replace(" ", " ")
        sentences.append(sentence)


#----------------------------------  Tokenize sentences
tokenizer = Tokenizer(oov_token="<OOV>")
tokenizer.fit_on_texts(sentences)
sequences = tokenizer.texts_to_sequences(sentences)
padded = pad_sequences(sequences, padding = 'post')

#--------------------------------- Tokenize labels
label_tokenizer = Tokenizer()
label_tokenizer.fit_on_texts(labels)
# label_word_index = label_tokenizer.word_index
label_seq = label_tokenizer.texts_to_sequences(labels)`
train_sentence = tf.convert_to_tensor(padded,tf.int32)
train_label = tf.convert_to_tensor(label_seq,tf.int32)

input = Input(shape=(2441,))
x = Embedding(input_dim=10000,output_dim=128)(input)
x = LSTM(64,return_sequences=True)(x)
x = LSTM(64,return_sequences=True)(x)
x = LSTM(64,return_sequences=True)(x)
x = Dropout(0.2)(x)
x = LSTM(64)(x)
x = Flatten()(x)
output = Dense(5, activation='softmax')(x)
model = tf.keras.models.Model(input,output)

model.compile(loss='sparse_categorical_crossentropy', optimizer='adam', metrics=['accuracy'])

model.fit(x=train_sentence,y=train_label,epochs=10)
InvalidArgumentError: Graph execution error:
# ...
# ...
train_sentence = tf.convert_to_tensor(padded,tf.int32)
train_label = tf.convert_to_tensor(label_seq,tf.int32)
train_label = train_label - 1

input = Input(shape=(2441,))
x = Embedding(input_dim=len(tokenizer.word_index) + 1,output_dim=128)(input)
x = LSTM(64,return_sequences=True)(x)
x = LSTM(64,return_sequences=True)(x)
x = LSTM(64,return_sequences=True)(x)
x = Dropout(0.2)(x)
x = LSTM(64)(x)
x = Flatten()(x)
output = Dense(5, activation='softmax')(x)
model = tf.keras.models.Model(input,output)

model.compile(loss='sparse_categorical_crossentropy', optimizer='adam', metrics=['accuracy'])

model.fit(x=train_sentence,y=train_label,epochs=10)

InvalidArgumentError: Graph execution error: cnn model

I’m having multiple errors while running this VGG training code (code and errors shown below). I don’t know if its because of my dataset or is it something else. import matplotlib.pyplot as plt import numpy as np import pandas as pd import seaborn as sns import os import tensorflow as tf import keras import glob as gb from keras.preprocessing

    import matplotlib.pyplot as plt
    import numpy as np
    import pandas as pd
    import seaborn as sns
    import os
    import tensorflow as tf
    import keras
    import glob as gb
    
    from keras.preprocessing.image import load_img, img_to_array
    from keras.preprocessing.image import ImageDataGenerator
    from keras.layers import Dense,Input,Dropout,GlobalAveragePooling2D,Flatten,Conv2D,BatchNormalization,Activation,MaxPooling2D
    from keras.models import Model,Sequential
    from tensorflow.keras.optimizers import Adam , SGD, RMSprop

!unzip gdrive/My Drive/data/emotioncollab2.zip > /dev/null

TRAIN_DIR="/content/eINTERFACE_2021_Image/train"
TEST_DIR="/content/eINTERFACE_2021_Image/test"
BATCH_SIZE= 64
for folder in os.listdir(TRAIN_DIR):
  files=gb.glob(pathname=str(TRAIN_DIR+"/"+folder+'/*.jpg'))
  print(f'for training data , found{len(files)} in folder {folder}')

for folder in os.listdir(TEST_DIR):
  files=gb.glob(pathname=str(TEST_DIR+"/"+folder+'/*.jpg'))
  print(f'for testing data , found{len(files)} in folder {folder}')

import random
import matplotlib.image as mpimg

def view_random_images(target_dir,target_class):
  target_folder = target_dir+target_class

  random_image=random.sample(os.listdir(target_folder),1)

  img=mpimg.imread(target_folder+'/'+random_image[0])
  plt.imshow(img)
  plt.title(target_class)
  plt.axis('off');
  print(f"Image shape{img.shape}")

  return img
class_names = ['Anger','Disgust','Fear','Happiness','Sadness','Surprise']
        plt.figure(figsize=(20,10))
        for i in range(18):
          plt.subplot(3,6,i+1)
          class_name=random.choice(class_names)
          img=view_random_images(target_dir="/content/eINTERFACE_2021_Image/train/",target_class=class_name)
        
        from keras.preprocessing.image import ImageDataGenerator
        
        train_datagen = ImageDataGenerator(rescale=1./255,
                                           shear_range=0.2,
                                           zoom_range=0.2,
                                           horizontal_flip=True)
        test_datagen=ImageDataGenerator(rescale=1./255)
        
        training_set=train_datagen.flow_from_directory(TRAIN_DIR,
                                                       target_size=(256,256),
                                                       batch_size=BATCH_SIZE,
                                                       class_mode='categorical')
        
        test_set=test_datagen.flow_from_directory(TEST_DIR,
                                                  target_size=(256,256),
                                                  batch_size=BATCH_SIZE,
                                                  class_mode='categorical'
                                                  )
        
        classifier=Sequential()
        
        classifier.add(Conv2D(16,(3,3),input_shape=(128,1288,3),activation='relu'))
        
        classifier.add(MaxPooling2D(pool_size=(2,2)))
        classifier.add(BatchNormalization(axis=-1))
        
        classifier.add(Conv2D(32,(3,3),activation='relu'))
        classifier.add(MaxPooling2D(pool_size=(2,2)))
        classifier.add(BatchNormalization(axis=-1))
        
        classifier.add(Flatten())
        
        classifier.add(Dense(units=128,activation='relu'))
        classifier.add(BatchNormalization())
        classifier.add(Dropout(rate=0.5))
        classifier.add(Dense(6,activation='softmax'))
        
        opt= tf.keras.optimizers.Adam(learning_rate=0.001 , decay=0.001/(50*0.5))
        
        
        classifier.compile(optimizer=opt,loss='sparse_categorical_crossentropy',metrics=['accuracy'])
        
        history=classifier.fit(training_set,epochs=50,validation_data=test_set,verbose=1)
        classifier.save('model.h5')
        
        
        Epoch 1/50
        ---------------------------------------------------------------------------
        InvalidArgumentError                      Traceback (most recent call last)
        <ipython-input-69-16dc3b00a1b3> in <module>()
        ----> 1 history=classifier.fit(training_set,epochs=50,validation_data=test_set,verbose=1)
              2 classifier.save('model.h5')
        
        1 frames
        /usr/local/lib/python3.7/dist-packages/tensorflow/python/eager/execute.py in quick_execute(op_name, num_outputs, inputs, attrs, ctx, name)
             53     ctx.ensure_initialized()
             54     tensors = pywrap_tfe.TFE_Py_Execute(ctx._handle, device_name, op_name,
        ---> 55                                         inputs, attrs, num_outputs)
             56   except core._NotOkStatusException as e:
             57     if name is not None:
        
        InvalidArgumentError: Graph execution error:
        
        Detected at node 'sequential_12/flatten_12/Reshape' defined at (most recent call last):
            File "/usr/lib/python3.7/runpy.py", line 193, in _run_module_as_main
              "__main__", mod_spec)
            File "/usr/lib/python3.7/runpy.py", line 85, in _run_code
              exec(code, run_globals)
            File "/usr/local/lib/python3.7/dist-packages/ipykernel_launcher.py", line 16, in <module>
              app.launch_new_instance()
            File "/usr/local/lib/python3.7/dist-packages/traitlets/config/application.py", line 846, in launch_instance
              app.start()
            File "/usr/local/lib/python3.7/dist-packages/ipykernel/kernelapp.py", line 499, in start
              self.io_loop.start()
            File "/usr/local/lib/python3.7/dist-packages/tornado/platform/asyncio.py", line 132, in start
              self.asyncio_loop.run_forever()
            File "/usr/lib/python3.7/asyncio/base_events.py", line 541, in run_forever
              self._run_once()
            File "/usr/lib/python3.7/asyncio/base_events.py", line 1786, in _run_once
              handle._run()
            File "/usr/lib/python3.7/asyncio/events.py", line 88, in _run
              self._context.run(self._callback, *self._args)
            File "/usr/local/lib/python3.7/dist-packages/tornado/platform/asyncio.py", line 122, in _handle_events
              handler_func(fileobj, events)
            File "/usr/local/lib/python3.7/dist-packages/tornado/stack_context.py", line 300, in null_wrapper
              return fn(*args, **kwargs)
            File "/usr/local/lib/python3.7/dist-packages/zmq/eventloop/zmqstream.py", line 577, in _handle_events
              self._handle_recv()
            File "/usr/local/lib/python3.7/dist-packages/zmq/eventloop/zmqstream.py", line 606, in _handle_recv
              self._run_callback(callback, msg)
            File "/usr/local/lib/python3.7/dist-packages/zmq/eventloop/zmqstream.py", line 556, in _run_callback
              callback(*args, **kwargs)
            File "/usr/local/lib/python3.7/dist-packages/tornado/stack_context.py", line 300, in null_wrapper
              return fn(*args, **kwargs)
            File "/usr/local/lib/python3.7/dist-packages/ipykernel/kernelbase.py", line 283, in dispatcher
              return self.dispatch_shell(stream, msg)
            File "/usr/local/lib/python3.7/dist-packages/ipykernel/kernelbase.py", line 233, in dispatch_shell
              handler(stream, idents, msg)
            File "/usr/local/lib/python3.7/dist-packages/ipykernel/kernelbase.py", line 399, in execute_request
              user_expressions, allow_stdin)
            File "/usr/local/lib/python3.7/dist-packages/ipykernel/ipkernel.py", line 208, in do_execute
              res = shell.run_cell(code, store_history=store_history, silent=silent)
            File "/usr/local/lib/python3.7/dist-packages/ipykernel/zmqshell.py", line 537, in run_cell
              return super(ZMQInteractiveShell, self).run_cell(*args, **kwargs)
            File "/usr/local/lib/python3.7/dist-packages/IPython/core/interactiveshell.py", line 2718, in run_cell
              interactivity=interactivity, compiler=compiler, result=result)
            File "/usr/local/lib/python3.7/dist-packages/IPython/core/interactiveshell.py", line 2822, in run_ast_nodes
              if self.run_code(code, result):
            File "/usr/local/lib/python3.7/dist-packages/IPython/core/interactiveshell.py", line 2882, in run_code
              exec(code_obj, self.user_global_ns, self.user_ns)
            File "<ipython-input-69-16dc3b00a1b3>", line 1, in <module>
              history=classifier.fit(training_set,epochs=50,validation_data=test_set,verbose=1)
            File "/usr/local/lib/python3.7/dist-packages/keras/utils/traceback_utils.py", line 64, in error_handler
              return fn(*args, **kwargs)
            File "/usr/local/lib/python3.7/dist-packages/keras/engine/training.py", line 1384, in fit
              tmp_logs = self.train_function(iterator)
            File "/usr/local/lib/python3.7/dist-packages/keras/engine/training.py", line 1021, in train_function
              return step_function(self, iterator)
            File "/usr/local/lib/python3.7/dist-packages/keras/engine/training.py", line 1010, in step_function
              outputs = model.distribute_strategy.run(run_step, args=(data,))
            File "/usr/local/lib/python3.7/dist-packages/keras/engine/training.py", line 1000, in run_step
              outputs = model.train_step(data)
            File "/usr/local/lib/python3.7/dist-packages/keras/engine/training.py", line 859, in train_step
              y_pred = self(x, training=True)
            File "/usr/local/lib/python3.7/dist-packages/keras/utils/traceback_utils.py", line 64, in error_handler
              return fn(*args, **kwargs)
            File "/usr/local/lib/python3.7/dist-packages/keras/engine/base_layer.py", line 1096, in __call__
              outputs = call_fn(inputs, *args, **kwargs)
            File "/usr/local/lib/python3.7/dist-packages/keras/utils/traceback_utils.py", line 92, in error_handler
              return fn(*args, **kwargs)
            File "/usr/local/lib/python3.7/dist-packages/keras/engine/sequential.py", line 374, in call
              return super(Sequential, self).call(inputs, training=training, mask=mask)
            File "/usr/local/lib/python3.7/dist-packages/keras/engine/functional.py", line 452, in call
              inputs, training=training, mask=mask)
            File "/usr/local/lib/python3.7/dist-packages/keras/engine/functional.py", line 589, in _run_internal_graph
              outputs = node.layer(*args, **kwargs)
            File "/usr/local/lib/python3.7/dist-packages/keras/utils/traceback_utils.py", line 64, in error_handler
              return fn(*args, **kwargs)
            File "/usr/local/lib/python3.7/dist-packages/keras/engine/base_layer.py", line 1096, in __call__
              outputs = call_fn(inputs, *args, **kwargs)
            File "/usr/local/lib/python3.7/dist-packages/keras/utils/traceback_utils.py", line 92, in error_handler
              return fn(*args, **kwargs)
            File "/usr/local/lib/python3.7/dist-packages/keras/layers/core/flatten.py", line 96, in call
              return tf.reshape(inputs, flattened_shape)
        Node: 'sequential_12/flatten_12/Reshape'
        Input to reshape is a tensor with 7872512 values, but the requested shape requires a multiple of 307200
             [[{{node sequential_12/flatten_12/Reshape}}]] [Op:__inference_train_function_10733]

UnimplementedError: Graph execution error: running nn on tensorflow

it is easy the train_generator is old you can use this. It is about the memory you create the matrixes initial operations that will reduce …

img_shape = (128,128,3)

# load pretrained model
base_model = tf.keras.applications.VGG19(input_shape=img_shape, include_top=False, weights='imagenet')

# freezing the model
base_model.trainable = False

#define the custom head for network
global_average_layer = tf.keras.layers.GlobalAveragePooling2D()(base_model.output)

# output / prediction layer
prediction_layer = tf.keras.layers.Dense(units=1, activation='sigmoid')(global_average_layer)

model = tf.keras.models.Model(inputs=base_model.input, outputs=prediction_layer)

# compile the model
opt = tf.keras.optimizers.RMSprop(learning_rate=0.0001)
model.compile(optimizer=opt, loss='binary_crossentropy', metrics=['accuracy'])

# create data generators
# import library
from tensorflow.keras.preprocessing.image import ImageDataGenerator

# define objects
data_gen_train = ImageDataGenerator(rescale=1/255.0)
data_gen_test = ImageDataGenerator(rescale=1/255.0)

# define variables
train_generator = data_gen_train.flow_from_directory(directory=training_dir, target_size=(128,128), batch_size=128, class_mode='binary')
test_generator = data_gen_test.flow_from_directory(directory=test_dir, target_size=(128,128), batch_size=128, class_mode='binary')

model.fit_generator(generator=train_generator, epochs=5, validation_data=test_generator)
/usr/local/lib/python3.7/dist-packages/ipykernel_launcher.py:1: UserWarning: `Model.fit_generator` is deprecated and will be removed in a future version. Please use `Model.fit`, which supports generators.
  """Entry point for launching an IPython kernel.
Epoch 1/5
---------------------------------------------------------------------------
UnimplementedError                        Traceback (most recent call last)
<ipython-input-46-18b18ca5977c> in <module>()
----> 1 model.fit_generator(generator=train_generator, epochs=5, validation_data=test_generator)

2 frames
/usr/local/lib/python3.7/dist-packages/keras/engine/training.py in fit_generator(self, generator, steps_per_epoch, epochs, verbose, callbacks, validation_data, validation_steps, validation_freq, class_weight, max_queue_size, workers, use_multiprocessing, shuffle, initial_epoch)
   2221         use_multiprocessing=use_multiprocessing,
   2222         shuffle=shuffle,
-> 2223         initial_epoch=initial_epoch)
   2224 
   2225   @doc_controls.do_not_generate_docs

/usr/local/lib/python3.7/dist-packages/keras/utils/traceback_utils.py in error_handler(*args, **kwargs)
     65     except Exception as e:  # pylint: disable=broad-except
     66       filtered_tb = _process_traceback_frames(e.__traceback__)
---> 67       raise e.with_traceback(filtered_tb) from None
     68     finally:
     69       del filtered_tb
/usr/local/lib/python3.7/dist-packages/tensorflow/python/eager/execute.py in quick_execute(op_name, num_outputs, inputs, attrs, ctx, name)
     53     ctx.ensure_initialized()
     54     tensors = pywrap_tfe.TFE_Py_Execute(ctx._handle, device_name, op_name,
---> 55                                         inputs, attrs, num_outputs)
     56   except core._NotOkStatusException as e:
     57     if name is not None:

UnimplementedError: Graph execution error:

Detected at node 'model/block1_conv1/Conv2D' defined at (most recent call last):
    File "/usr/lib/python3.7/runpy.py", line 193, in _run_module_as_main
      "__main__", mod_spec)
import os
from os.path import exists

import tensorflow as tf

"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
: Variables
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
img_shape = (128,128,3)
BATCH_SIZE = 1
IMG_SIZE = (128, 128)

database_buffer = "F:\models\buffer\" + os.path.basename(__file__).split('.')[0] + "\TF_DataSets_01.h5"
database_buffer_dir = os.path.dirname(database_buffer)

if not exists(database_buffer_dir) : 
    os.mkdir(database_buffer_dir)
    print("Create directory: " + database_buffer_dir)

"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
: DataSets
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
PATH = 'F:\datasets\downloads\cats_name'
train_dir = os.path.join(PATH, 'train')
validation_dir = os.path.join(PATH, 'validation')

train_dataset = tf.keras.utils.image_dataset_from_directory(train_dir,
                                                                        shuffle=True,
                                                                        batch_size=BATCH_SIZE,
                                                                        image_size=IMG_SIZE)

"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
: Model Initialize
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
# load pretrained model
base_model = tf.keras.applications.VGG19(input_shape=img_shape, include_top=False, weights='imagenet')

# freezing the model
base_model.trainable = False

#define the custom head for network
global_average_layer = tf.keras.layers.GlobalAveragePooling2D()(base_model.output)

# output / prediction layer
prediction_layer = tf.keras.layers.Dense(units=1, activation='sigmoid')(global_average_layer)

model = tf.keras.models.Model(inputs=base_model.input, outputs=prediction_layer)
model.summary()

# compile the model
opt = tf.keras.optimizers.RMSprop(learning_rate=0.0001)
model.compile(optimizer=opt, loss='binary_crossentropy', metrics=['accuracy'])

"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
: Training
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
history = model.fit( train_dataset, batch_size=100, epochs=50 )

Model.fit gives InvalidArgumentError: Graph execution error:

My code is as follows: from keras.models import Sequential from keras.layers import Conv2D, MaxPooling2D from keras.layers import Activation, Dropout, Flatten, Dense from keras.preprocessing.image

from keras.models import Sequential
from keras.layers import Conv2D, MaxPooling2D
from keras.layers import Activation, Dropout, Flatten, Dense
from keras.preprocessing.image import ImageDataGenerator
#import os
#os.environ['TF_CPP_MIN_LOG_LEVEL'] = '1'
model = Sequential()
model.add(Conv2D(16, (3, 3), input_shape=(32,32, 3)))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))

model.add(Conv2D(64, (3, 3)))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))


model.add(Dropout(0.5))
model.add(Dense(27))
model.add(Activation('sigmoid'))

model.compile(loss = 'categorical_crossentropy',
              optimizer = 'rmsprop',
              metrics = ['accuracy'])

batch_size = 5

# Training Augmentation configuration

train_datagen = ImageDataGenerator(rescale = 1./255, 
                                   shear_range = 0.2,
                                   zoom_range = 0.2,
                                   horizontal_flip = False)

# Testing Augmentation - Only Rescaling
test_datagen = ImageDataGenerator(rescale = 1./255)

# Generates batches of Augmented Image data
train_generator = train_datagen.flow_from_directory('D:/college_project/resources/training/', 
                                                    target_size = (64, 64), 
                                                    batch_size = batch_size,
                                                    class_mode = 'categorical') 

# Generator for validation data
validation_generator = test_datagen.flow_from_directory('D:/college_project/resources/testing/', 
                                                        target_size = (64, 64),
                                                        batch_size = batch_size,
                                                        class_mode = 'categorical')

# Fit the model on Training data

model.fit(train_generator, epochs=5, validation_data=validation_generator)


# Evaluating model performance on Testing data
loss, accuracy = model.evaluate(validation_generator)

print("nModel's Evaluation Metrics: ")
print("---------------------------")
print("Accuracy: {} nLoss: {}".format(accuracy, loss))```
Traceback (most recent call last):

  File "D:college_projectmodulestraing example.py", line 56, in <module>
    `model.fit(train_generator, epochs=5, validation_data=validation_generator)`

  File "C:Usersshubhanaconda3libsite-packageskerasutilstraceback_utils.py", line 67, in error_handler
    raise e.with_traceback(filtered_tb) from None

  File "C:Usersshubhanaconda3libsite-packagestensorflowpythoneagerexecute.py", line 54, in quick_execute
    tensors = pywrap_tfe.TFE_Py_Execute(ctx._handle, device_name, op_name,

InvalidArgumentError: Graph execution error:
import tensorflow as tf
from keras.layers import Conv2D, MaxPooling2D
from keras.layers import Activation, Dropout, Flatten, Dense
from keras.preprocessing.image import ImageDataGenerator


flowers = tf.keras.utils.get_file(
    'flower_photos',
    'https://storage.googleapis.com/download.tensorflow.org/example_images/flower_photos.tgz',
    untar=True)

train_datagen = tf.keras.preprocessing.image.ImageDataGenerator(rescale = 1./255, 
                                   shear_range = 0.2,
                                   zoom_range = 0.2,
                                   horizontal_flip = False)


train_generator = train_datagen.flow_from_directory(directory = flowers,
                                               batch_size = 32,
                                               target_size = (32, 32),
                                               seed = 42, class_mode='categorical')

model = Sequential()
model.add(Conv2D(16, (3, 3), input_shape=(32,32, 3)))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))

model.add(Conv2D(64, (3, 3)))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))

model.add(Flatten())
model.add(Dropout(0.5))
model.add(Dense(5))
model.add(Activation('softmax'))

model.compile(loss = 'categorical_crossentropy',
              optimizer = 'rmsprop',
              metrics = ['accuracy'])

model.fit(train_generator, epochs=5)

Mlmd.errors.InvalidArgumentError

mlmd.errors.InvalidArgumentError( message ) Except as otherwise noted, the content of this page is licensed under the Creative Commons Attribution 4.0 License , and code samples are licensed under the Apache 2.0 License .


mlmd.errors.InvalidArgumentError(
    message
)
import tensorflow as tf  
from tensorflow.keras import layers  

class Evaluation:

    def __init__(self, strategy=None):  
        
        # prepare for encoded img data
        self.strategy = strategy
        H, W, C = 10, 10, 3
        imgs = tf.cast(tf.zeros([8, H, W, C]), tf.uint8)
        encodes = []
        for img in imgs:
            encode = tf.io.encode_jpeg(img)
            encodes.append(encode)
        encodes = tf.stack(encodes, axis = 0) 
        
        # convert encoded img data to tf.data
        self.dataset = tf.data.Dataset.from_tensor_slices(encodes)
        self.dataset = self.dataset.batch(2)
        self.dataset = self.strategy.experimental_distribute_dataset(self.dataset)
        with self.strategy.scope():
            self.conv = layers.Conv2D(32, (1, 1), strides=(1, 1), padding='same')
        self.parallel_iterations = 10
    
    def preprocess(self, encoded):
        # preprocess for tf.data
        image = tf.io.decode_jpeg(encoded, channels=3)
        image = tf.image.resize(image, [20,20])
        return image

    @tf.function
    def serving(self, inputs):
        # data preprocess
        image = tf.map_fn(self.preprocess,
                          inputs,
                          fn_output_signature=tf.float32,
                          parallel_iterations=self.parallel_iterations)
        
        # inference for each batch
        prediction = self.conv(image)
        return prediction

    @tf.function
    def infer(self, serve_summary_writer):
        # inference for all batches
        with serve_summary_writer.as_default():
            batch = tf.cast(0, tf.int64)
            for data in self.dataset:
                prediction_perReplica = strategy.run(self.serving, args=(data,))
                prediction_tensor = prediction_perReplica.values
                prediction_concat = tf.concat(prediction_tensor, axis = 0)
                tf.summary.write(tag="prediction", tensor=prediction_concat, step=batch)
                batch += 1
                
    def eval(self):
        serve_summary_writer = tf.summary.create_file_writer('save_file', max_queue=100000, flush_millis=100000)
        self.infer(serve_summary_writer)
        serve_summary_writer.close()
        tf.io.gfile.rmtree('save_file')  

if __name__ == "__main__":

    strategy = tf.distribute.MirroredStrategy()
    e = Evaluation(strategy)   
    e.eval()

TensorFlow:InvalidArgumentError: Graph 执行错误:

答案 # 1. The input_dim of the Embedding layer has to correspond to the size of your data’s vocabulary + 1. Also, your labels should begin from zero and not from one when using the sparse_categorical_crossentropy loss function. Here is …

import csv
from tensorflow.keras.preprocessing.text import Tokenizer
from tensorflow.keras.preprocessing.sequence import pad_sequences
import tensorflow as tf
from tensorflow.keras.layers import Input,Dense,LSTM,Flatten,GlobalAveragePooling1D,Embedding,Dropout

!wget --no-check-certificate 
    https://storage.googleapis.com/laurencemoroney-blog.appspot.com/bbc-text.csv 
    -O /tmp/bbc-text.csv



# Stopwords list from https://github.com/Yoast/YoastSEO.js/blob/develop/src/config/stopwords.js
# Convert it to a Python list and paste it here
stopwords = ["a", "about", "above", "after", "again", "against", "all", "am", "an", "and", "any", "are", "as", "at",
             "be", "because", "been", "before", "being", "below", "between", "both", "but", "by", "could", "did", "do",
             "does", "doing", "down", "during", "each", "few", "for", "from", "further", "had", "has", "have", "having",
             "he", "he'd", "he'll", "he's", "her", "here", "here's", "hers", "herself", "him", "himself", "his", "how",
             "how's", "i", "i'd", "i'll", "i'm", "i've", "if", "in", "into", "is", "it", "it's", "its", "itself",
             "let's", "me", "more", "most", "my", "myself", "nor", "of", "on", "once", "only", "or", "other", "ought",
             "our", "ours", "ourselves", "out", "over", "own", "same", "she", "she'd", "she'll", "she's", "should",
             "so", "some", "such", "than", "that", "that's", "the", "their", "theirs", "them", "themselves", "then",
             "there", "there's", "these", "they", "they'd", "they'll", "they're", "they've", "this", "those", "through",
             "to", "too", "under", "until", "up", "very", "was", "we", "we'd", "we'll", "we're", "we've", "were",
             "what", "what's", "when", "when's", "where", "where's", "which", "while", "who", "who's", "whom", "why",
             "why's", "with", "would", "you", "you'd", "you'll", "you're", "you've", "your", "yours", "yourself",
             "yourselves"]

#----------------------------------- Ream from Csv and remove the stopwords
sentences = []
labels = []
with open("/tmp/bbc-text.csv", 'r') as csvfile:
    reader = csv.reader(csvfile, delimiter=',')
    next(reader)
    for row in reader:
        labels.append(row[0])
        sentence = row[1]
        for word in stopwords:
            token = " " + word + " "
            sentence = sentence.replace(token, " ")
            sentence = sentence.replace(" ", " ")
        sentences.append(sentence)


#----------------------------------  Tokenize sentences
tokenizer = Tokenizer(oov_token=";OOV;")
tokenizer.fit_on_texts(sentences)
sequences = tokenizer.texts_to_sequences(sentences)
padded = pad_sequences(sequences, padding = 'post')

#--------------------------------- Tokenize labels
label_tokenizer = Tokenizer()
label_tokenizer.fit_on_texts(labels)
# label_word_index = label_tokenizer.word_index
label_seq = label_tokenizer.texts_to_sequences(labels)`
train_sentence = tf.convert_to_tensor(padded,tf.int32)
train_label = tf.convert_to_tensor(label_seq,tf.int32)

input = Input(shape=(2441,))
x = Embedding(input_dim=10000,output_dim=128)(input)
x = LSTM(64,return_sequences=True)(x)
x = LSTM(64,return_sequences=True)(x)
x = LSTM(64,return_sequences=True)(x)
x = Dropout(0.2)(x)
x = LSTM(64)(x)
x = Flatten()(x)
output = Dense(5, activation='softmax')(x)
model = tf.keras.models.Model(input,output)

model.compile(loss='sparse_categorical_crossentropy', optimizer='adam', metrics=['accuracy'])

model.fit(x=train_sentence,y=train_label,epochs=10)
InvalidArgumentError: Graph execution error:
# ...
# ...
train_sentence = tf.convert_to_tensor(padded,tf.int32)
train_label = tf.convert_to_tensor(label_seq,tf.int32)
train_label = train_label - 1

input = Input(shape=(2441,))
x = Embedding(input_dim=len(tokenizer.word_index) + 1,output_dim=128)(input)
x = LSTM(64,return_sequences=True)(x)
x = LSTM(64,return_sequences=True)(x)
x = LSTM(64,return_sequences=True)(x)
x = Dropout(0.2)(x)
x = LSTM(64)(x)
x = Flatten()(x)
output = Dense(5, activation='softmax')(x)
model = tf.keras.models.Model(input,output)

model.compile(loss='sparse_categorical_crossentropy', optimizer='adam', metrics=['accuracy'])

model.fit(x=train_sentence,y=train_label,epochs=10)

Unknown error: Graph execution error
#16425

UnknownError: Graph execution error: 2 root error(s) found. (0) UNKNOWN: UnidentifiedImageError: cannot identify image file <_io.BytesIO object at 0x7fc8d9912d70>

 [[{{node PyFunc}}]]
 [[IteratorGetNext]]
 [[IteratorGetNext/_2]]
 [[{{node PyFunc}}]]
 [[IteratorGetNext]]

Previous PostNext Post

I am getting this error I dont know the reason, please be kind and guide me how can i resolve this error. I have 2 classes in my dataset.

Epoch 1/2 ————————————————————————— InvalidArgumentError Traceback (most recent call last) Input In [22], in <cell line: 9>**()** 2 early_stopping=keras.callbacks.EarlyStopping( 3 monitor=»val_loss», 4 patience=2, 5 verbose=2 6 ) 8 #Training —-> 9 history=model.fit( 10 train_data, 11 validation_data=val_data, 12 callbacks=[early_stopping], 13 epochs=2, 14 verbose=2 15 ) File ~AppDataLocalProgramsPythonPython310libsite-packageskerasutilstraceback_utils.py:67, in filter_traceback.<locals>.error_handler**(*args, **kwargs)** 65 except Exception as e: # pylint: disable=broad-except 66 filtered_tb = _process_traceback_frames(e.__traceback__) —> 67 raise e.with_traceback(filtered_tb) from None 68 finally: 69 del filtered_tb File ~AppDataLocalProgramsPythonPython310libsite-packagestensorflowpythoneagerexecute.py:54, in quick_execute**(op_name, num_outputs, inputs, attrs, ctx, name)** 52 try: 53 ctx.ensure_initialized() —> 54 tensors = pywrap_tfe.TFE_Py_Execute(ctx._handle, device_name, op_name, 55 inputs, attrs, num_outputs) 56 except core._NotOkStatusException as e: 57 if name is not None: InvalidArgumentError: Graph execution error: Detected at node ‘categorical_crossentropy/softmax_cross_entropy_with_logits’ defined at (most recent call last): File «C:UserssidraAppDataLocalProgramsPythonPython310librunpy.py», line 196, in _run_module_as_main return _run_code(code, main_globals, None, File «C:UserssidraAppDataLocalProgramsPythonPython310librunpy.py», line 86, in _run_code exec(code, run_globals) File «C:UserssidraAppDataLocalProgramsPythonPython310libsite-packagesipykernel_launcher.py», line 17, in <module> app.launch_new_instance() File «C:UserssidraAppDataLocalProgramsPythonPython310libsite-packagestraitletsconfigapplication.py», line 846, in launch_instance app.start() File «C:UserssidraAppDataLocalProgramsPythonPython310libsite-packagesipykernelkernelapp.py», line 712, in start self.io_loop.start() File «C:UserssidraAppDataLocalProgramsPythonPython310libsite-packagestornadoplatformasyncio.py», line 199, in start self.asyncio_loop.run_forever() File «C:UserssidraAppDataLocalProgramsPythonPython310libasynciobase_events.py», line 600, in run_forever self._run_once() File «C:UserssidraAppDataLocalProgramsPythonPython310libasynciobase_events.py», line 1896, in _run_once handle._run() File «C:UserssidraAppDataLocalProgramsPythonPython310libasyncioevents.py», line 80, in _run self._context.run(self._callback, *self._args) File «C:UserssidraAppDataLocalProgramsPythonPython310libsite-packagesipykernelkernelbase.py», line 504, in dispatch_queue await self.process_one() File «C:UserssidraAppDataLocalProgramsPythonPython310libsite-packagesipykernelkernelbase.py», line 493, in process_one await dispatch(*args) File «C:UserssidraAppDataLocalProgramsPythonPython310libsite-packagesipykernelkernelbase.py», line 400, in dispatch_shell await result File «C:UserssidraAppDataLocalProgramsPythonPython310libsite-packagesipykernelkernelbase.py», line 724, in execute_request reply_content = await reply_content File «C:UserssidraAppDataLocalProgramsPythonPython310libsite-packagesipykernelipkernel.py», line 390, in do_execute res = shell.run_cell(code, store_history=store_history, silent=silent) File «C:UserssidraAppDataLocalProgramsPythonPython310libsite-packagesipykernelzmqshell.py», line 528, in run_cell return super().run_cell(*args, **kwargs) File «C:UserssidraAppDataLocalProgramsPythonPython310libsite-packagesIPythoncoreinteractiveshell.py», line 2863, in run_cell result = self._run_cell( File «C:UserssidraAppDataLocalProgramsPythonPython310libsite-packagesIPythoncoreinteractiveshell.py», line 2909, in _run_cell return runner(coro) File «C:UserssidraAppDataLocalProgramsPythonPython310libsite-packagesIPythoncoreasync_helpers.py», line 129, in _pseudo_sync_runner coro.send(None) File «C:UserssidraAppDataLocalProgramsPythonPython310libsite-packagesIPythoncoreinteractiveshell.py», line 3106, in run_cell_async has_raised = await self.run_ast_nodes(code_ast.body, cell_name, File «C:UserssidraAppDataLocalProgramsPythonPython310libsite-packagesIPythoncoreinteractiveshell.py», line 3309, in run_ast_nodes if await self.run_code(code, result, async_=asy): File «C:UserssidraAppDataLocalProgramsPythonPython310libsite-packagesIPythoncoreinteractiveshell.py», line 3369, in run_code exec(code_obj, self.user_global_ns, self.user_ns) File «C:UserssidraAppDataLocalTempipykernel_110082530737387.py», line 9, in <cell line: 9> history=model.fit( File «C:UserssidraAppDataLocalProgramsPythonPython310libsite-packageskerasutilstraceback_utils.py», line 64, in error_handler return fn(*args, **kwargs) File «C:UserssidraAppDataLocalProgramsPythonPython310libsite-packageskerasenginetraining.py», line 1409, in fit tmp_logs = self.train_function(iterator) File «C:UserssidraAppDataLocalProgramsPythonPython310libsite-packageskerasenginetraining.py», line 1051, in train_function return step_function(self, iterator) File «C:UserssidraAppDataLocalProgramsPythonPython310libsite-packageskerasenginetraining.py», line 1040, in step_function outputs = model.distribute_strategy.run(run_step, args=(data,)) File «C:UserssidraAppDataLocalProgramsPythonPython310libsite-packageskerasenginetraining.py», line 1030, in run_step outputs = model.train_step(data) File «C:UserssidraAppDataLocalProgramsPythonPython310libsite-packageskerasenginetraining.py», line 890, in train_step loss = self.compute_loss(x, y, y_pred, sample_weight) File «C:UserssidraAppDataLocalProgramsPythonPython310libsite-packageskerasenginetraining.py», line 948, in compute_loss return self.compiled_loss( File «C:UserssidraAppDataLocalProgramsPythonPython310libsite-packageskerasenginecompile_utils.py», line 201, in __call__ loss_value = loss_obj(y_t, y_p, sample_weight=sw) File «C:UserssidraAppDataLocalProgramsPythonPython310libsite-packageskeraslosses.py», line 139, in __call__ losses = call_fn(y_true, y_pred) File «C:UserssidraAppDataLocalProgramsPythonPython310libsite-packageskeraslosses.py», line 243, in call return ag_fn(y_true, y_pred, **self._fn_kwargs) File «C:UserssidraAppDataLocalProgramsPythonPython310libsite-packageskeraslosses.py», line 1787, in categorical_crossentropy return backend.categorical_crossentropy( File «C:UserssidraAppDataLocalProgramsPythonPython310libsite-packageskerasbackend.py», line 5134, in categorical_crossentropy return tf.nn.softmax_cross_entropy_with_logits( Node: ‘categorical_crossentropy/softmax_cross_entropy_with_logits’ logits and labels must be broadcastable: logits_size=[64,4] labels_size=[64,2] [[{{node categorical_crossentropy/softmax_cross_entropy_with_logits}}]] [Op:__inference_train_function_2177]

I’m having multiple errors while running this VGG training code (code and errors shown below). I don’t know if its because of my dataset or is it something else.

import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import tensorflow as tf
from tensorflow.keras.preprocessing import image
from tensorflow.keras.applications.vgg16 import preprocess_input
from tensorflow.keras.preprocessing.image import ImageDataGenerator
from sklearn.metrics.pairwise import cosine_similarity
import os
import scipy

train_directory = 'sign_data/train' #To be changed
test_directory = 'sign_data/test' #To be changed

train_datagen = ImageDataGenerator(
    rescale = 1./255,
    rotation_range = 0.1,
    width_shift_range = 0.2,
    height_shift_range = 0.2,
    shear_range = 0.1
)

train_generator = train_datagen.flow_from_directory(
    train_directory,
    target_size = (224, 224),
    color_mode = 'rgb',
    shuffle = True,
    batch_size=32
    
)


test_datagen = ImageDataGenerator(
    rescale = 1./255,
)

test_generator = test_datagen.flow_from_directory(
    test_directory,
    target_size = (224, 224),
    color_mode = 'rgb',
    shuffle = True,
    batch_size=32
)

from tensorflow.keras.applications.vgg16 import VGG16   
vgg_basemodel = VGG16(include_top=True)

from tensorflow.keras.callbacks import ReduceLROnPlateau, ModelCheckpoint, EarlyStopping

early_stopping = EarlyStopping(monitor='val_loss', mode='min', verbose=1, patience=5)

vgg_model = tf.keras.Sequential(vgg_basemodel.layers[:-1])
vgg_model.add(tf.keras.layers.Dense(10, activation = 'softmax'))

# Freezing original layers
for layer in vgg_model.layers[:-1]:
    layer.trainable = False

vgg_model.compile(loss='categorical_crossentropy',
                  optimizer=tf.keras.optimizers.SGD(momentum=0.9, learning_rate=0.001, decay=0.01),
                  metrics=['accuracy'])

history = vgg_model.fit(train_generator,
              epochs=30,
              batch_size=64,
              validation_data=test_generator,
              callbacks=[early_stopping])

# finetuning with all layers set trainable

for layer in vgg_model.layers:
    layer.trainable = True

vgg_model.compile(loss='categorical_crossentropy',
                  optimizer=tf.keras.optimizers.SGD(momentum=0.9, lr=0.0001),
                  metrics=['accuracy'])

history2 = vgg_model.fit(train_generator,
              epochs=5,
              batch_size=64,
              validation_data=test_generator,
              callbacks=[early_stopping])

vgg_model.save('saved_models/vgg_finetuned_model')

First error: Invalid Argument Error

    InvalidArgumentError                      Traceback (most recent call last)
<ipython-input-13-292bf57ef59f> in <module>()
     14               batch_size=64,
     15               validation_data=test_generator,
---> 16               callbacks=[early_stopping])
     17 
     18 # finetuning with all layers set trainable

    /usr/local/lib/python3.7/dist-packages/keras/utils/traceback_utils.py in error_handler(*args, **kwargs)
     65     except Exception as e:  # pylint: disable=broad-except
     66       filtered_tb = _process_traceback_frames(e.__traceback__)
---> 67       raise e.with_traceback(filtered_tb) from None
     68     finally:
     69       del filtered_tb

/usr/local/lib/python3.7/dist-packages/tensorflow/python/eager/execute.py in quick_execute(op_name, num_outputs, inputs, attrs, ctx, name)
     53     ctx.ensure_initialized()
     54     tensors = pywrap_tfe.TFE_Py_Execute(ctx._handle, device_name, op_name,
---> 55                                         inputs, attrs, num_outputs)
     56   except core._NotOkStatusException as e:
     57     if name is not None:

Second Error: Graph Execution Error

    InvalidArgumentError: Graph execution error:
Detected at node 'categorical_crossentropy/softmax_cross_entropy_with_logits' defined at (most recent call last):
    File "/usr/lib/python3.7/runpy.py", line 193, in _run_module_as_main
      "__main__", mod_spec)
    File "/usr/lib/python3.7/runpy.py", line 85, in _run_code
      exec(code, run_globals)
    File "/usr/local/lib/python3.7/dist-packages/ipykernel_launcher.py", line 16, in <module>
      app.launch_new_instance()
    File "/usr/local/lib/python3.7/dist-packages/traitlets/config/application.py", line 846, in launch_instance
      app.start()
    File "/usr/local/lib/python3.7/dist-packages/ipykernel/kernelapp.py", line 499, in start
      self.io_loop.start()
    File "/usr/local/lib/python3.7/dist-packages/tornado/platform/asyncio.py", line 132, in start
      self.asyncio_loop.run_forever()
    File "/usr/lib/python3.7/asyncio/base_events.py", line 541, in run_forever
      self._run_once()
    File "/usr/lib/python3.7/asyncio/base_events.py", line 1786, in _run_once
      handle._run()
    File "/usr/lib/python3.7/asyncio/events.py", line 88, in _run
      self._context.run(self._callback, *self._args)
    File "/usr/local/lib/python3.7/dist-packages/tornado/platform/asyncio.py", line 122, in _handle_events
      handler_func(fileobj, events)
    File "/usr/local/lib/python3.7/dist-packages/tornado/stack_context.py", line 300, in null_wrapper
      return fn(*args, **kwargs)
    File "/usr/local/lib/python3.7/dist-packages/zmq/eventloop/zmqstream.py", line 452, in _handle_events
      self._handle_recv()
    File "/usr/local/lib/python3.7/dist-packages/zmq/eventloop/zmqstream.py", line 481, in _handle_recv
      self._run_callback(callback, msg)
    File "/usr/local/lib/python3.7/dist-packages/zmq/eventloop/zmqstream.py", line 431, in _run_callback
      callback(*args, **kwargs)
    File "/usr/local/lib/python3.7/dist-packages/tornado/stack_context.py", line 300, in null_wrapper
      return fn(*args, **kwargs)
    File "/usr/local/lib/python3.7/dist-packages/ipykernel/kernelbase.py", line 283, in dispatcher
      return self.dispatch_shell(stream, msg)
    File "/usr/local/lib/python3.7/dist-packages/ipykernel/kernelbase.py", line 233, in dispatch_shell
      handler(stream, idents, msg)
    File "/usr/local/lib/python3.7/dist-packages/ipykernel/kernelbase.py", line 399, in execute_request
      user_expressions, allow_stdin)
    File "/usr/local/lib/python3.7/dist-packages/ipykernel/ipkernel.py", line 208, in do_execute
      res = shell.run_cell(code, store_history=store_history, silent=silent)
    File "/usr/local/lib/python3.7/dist-packages/ipykernel/zmqshell.py", line 537, in run_cell
      return super(ZMQInteractiveShell, self).run_cell(*args, **kwargs)
    File "/usr/local/lib/python3.7/dist-packages/IPython/core/interactiveshell.py", line 2718, in run_cell
      interactivity=interactivity, compiler=compiler, result=result)
    File "/usr/local/lib/python3.7/dist-packages/IPython/core/interactiveshell.py", line 2822, in run_ast_nodes
      if self.run_code(code, result):
    File "/usr/local/lib/python3.7/dist-packages/IPython/core/interactiveshell.py", line 2882, in run_code
      exec(code_obj, self.user_global_ns, self.user_ns)
    File "<ipython-input-13-292bf57ef59f>", line 16, in <module>
      callbacks=[early_stopping])
    File "/usr/local/lib/python3.7/dist-packages/keras/utils/traceback_utils.py", line 64, in error_handler
      return fn(*args, **kwargs)
    File "/usr/local/lib/python3.7/dist-packages/keras/engine/training.py", line 1384, in fit
      tmp_logs = self.train_function(iterator)
    File "/usr/local/lib/python3.7/dist-packages/keras/engine/training.py", line 1021, in train_function
      return step_function(self, iterator)
    File "/usr/local/lib/python3.7/dist-packages/keras/engine/training.py", line 1010, in step_function
      outputs = model.distribute_strategy.run(run_step, args=(data,))
    File "/usr/local/lib/python3.7/dist-packages/keras/engine/training.py", line 1000, in run_step
      outputs = model.train_step(data)
    File "/usr/local/lib/python3.7/dist-packages/keras/engine/training.py", line 860, in train_step
      loss = self.compute_loss(x, y, y_pred, sample_weight)
    File "/usr/local/lib/python3.7/dist-packages/keras/engine/training.py", line 919, in compute_loss
      y, y_pred, sample_weight, regularization_losses=self.losses)
    File "/usr/local/lib/python3.7/dist-packages/keras/engine/compile_utils.py", line 201, in __call__
      loss_value = loss_obj(y_t, y_p, sample_weight=sw)
    File "/usr/local/lib/python3.7/dist-packages/keras/losses.py", line 141, in __call__
      losses = call_fn(y_true, y_pred)
    File "/usr/local/lib/python3.7/dist-packages/keras/losses.py", line 245, in call
      return ag_fn(y_true, y_pred, **self._fn_kwargs)
    File "/usr/local/lib/python3.7/dist-packages/keras/losses.py", line 1790, in categorical_crossentropy
      y_true, y_pred, from_logits=from_logits, axis=axis)
    File "/usr/local/lib/python3.7/dist-packages/keras/backend.py", line 5099, in categorical_crossentropy
      labels=target, logits=output, axis=axis)
Node: 'categorical_crossentropy/softmax_cross_entropy_with_logits'
logits and labels must be broadcastable: logits_size=[32,10] labels_size=[32,128]
     [[]] [Op:__inference_train_function_11227]

I’m running this on google colaboratory. Is there a module that I should install? Or is it purely an error on the code itself?

Понравилась статья? Поделить с друзьями:
  • Invalid use of property vba ошибка
  • Invalid url как исправить
  • Invalid url error fix
  • Invalid token pubg ошибка
  • Invalid timestamp libapp md5 checksum error на литрес что делать