UnicodeDecodeError: ‘utf-8‘ codec can‘t decode byte 0xXX in position XX: invalid continuation byte

INFO/LOGS:

UnicodeDecodeError                        Traceback (most recent call last)
~\AppData\Local\Temp/ipykernel_15316/4169674876.py in <module>
----> 1 tf.estimator.train_and_evaluate(estimator, train_spec, eval_spec)
      2 
      3 # Evaluate Metrics.
      4 metrics = estimator.evaluate(input_fn=lambda: eval_input_fn(filepath=eval_data, example_parser=example_parser,
      5                                                             batch_size=batch_size))

~\miniconda3\lib\site-packages\tensorflow_estimator\python\estimator\training.py in train_and_evaluate(estimator, train_spec, eval_spec)
    502         '(with task id 0).  Given task id {}'.format(config.task_id))
    503 
--> 504   return executor.run()
    505 
    506 

~\miniconda3\lib\site-packages\tensorflow_estimator\python\estimator\training.py in run(self)
    643       tf.compat.v1.logging.info(
    644           'Running training and evaluation locally (non-distributed).')
--> 645       return self.run_local()
    646 
    647     # Distributed case.

~\miniconda3\lib\site-packages\tensorflow_estimator\python\estimator\training.py in run_local(self)
    740     saving_listeners = self._train_spec.saving_listeners + (listener_for_eval,)
    741 
--> 742     self._estimator.train(
    743         input_fn=self._train_spec.input_fn,
    744         max_steps=self._train_spec.max_steps,

~\miniconda3\lib\site-packages\tensorflow_estimator\python\estimator\estimator.py in train(self, input_fn, hooks, steps, max_steps, saving_listeners)
    358 
    359       saving_listeners = _check_listeners_type(saving_listeners)
--> 360       loss = self._train_model(input_fn, hooks, saving_listeners)
    361       logging.info('Loss for final step: %s.', loss)
    362       return self

~\miniconda3\lib\site-packages\tensorflow_estimator\python\estimator\estimator.py in _train_model(self, input_fn, hooks, saving_listeners)
   1184       return self._train_model_distributed(input_fn, hooks, saving_listeners)
   1185     else:
-> 1186       return self._train_model_default(input_fn, hooks, saving_listeners)
   1187 
   1188   def _train_model_default(self, input_fn, hooks, saving_listeners):

~\miniconda3\lib\site-packages\tensorflow_estimator\python\estimator\estimator.py in _train_model_default(self, input_fn, hooks, saving_listeners)
   1215                                            self.config)
   1216       global_step_tensor = tf.compat.v1.train.get_global_step(g)
-> 1217       return self._train_with_estimator_spec(estimator_spec, worker_hooks,
   1218                                              hooks, global_step_tensor,
   1219                                              saving_listeners)

~\miniconda3\lib\site-packages\tensorflow_estimator\python\estimator\estimator.py in _train_with_estimator_spec(self, estimator_spec, worker_hooks, hooks, global_step_tensor, saving_listeners)
   1510                   output_dir=self._config.model_dir))
   1511 
-> 1512     with training.MonitoredTrainingSession(
   1513         master=self._config.master,
   1514         is_chief=self._config.is_chief,

~\miniconda3\lib\site-packages\tensorflow\python\training\monitored_session.py in MonitoredTrainingSession(master, is_chief, checkpoint_dir, scaffold, hooks, chief_only_hooks, save_checkpoint_secs, save_summaries_steps, save_summaries_secs, config, stop_grace_period_secs, log_step_count_steps, max_wait_secs, save_checkpoint_steps, summary_dir, save_graph_def)
    607   if hooks:
    608     all_hooks.extend(hooks)
--> 609   return MonitoredSession(
    610       session_creator=session_creator,
    611       hooks=all_hooks,

~\miniconda3\lib\site-packages\tensorflow\python\training\monitored_session.py in __init__(self, session_creator, hooks, stop_grace_period_secs)
   1052                hooks=None,
   1053                stop_grace_period_secs=120):
-> 1054     super(MonitoredSession, self).__init__(
   1055         session_creator,
   1056         hooks,

~\miniconda3\lib\site-packages\tensorflow\python\training\monitored_session.py in __init__(self, session_creator, hooks, should_recover, stop_grace_period_secs)
    755         stop_grace_period_secs=stop_grace_period_secs)
    756     if should_recover:
--> 757       self._sess = _RecoverableSession(self._coordinated_creator)
    758     else:
    759       self._sess = self._coordinated_creator.create_session()

~\miniconda3\lib\site-packages\tensorflow\python\training\monitored_session.py in __init__(self, sess_creator)
   1261     """
   1262     self._sess_creator = sess_creator
-> 1263     _WrappedSession.__init__(self, self._create_session())
   1264 
   1265   def _create_session(self):

~\miniconda3\lib\site-packages\tensorflow\python\training\monitored_session.py in _create_session(self)
   1266     while True:
   1267       try:
-> 1268         return self._sess_creator.create_session()
   1269       except _PREEMPTION_ERRORS as e:
   1270         logging.info(

~\miniconda3\lib\site-packages\tensorflow\python\training\monitored_session.py in create_session(self)
    915       # Inform the hooks that a new session has been created.
    916       for hook in self._hooks:
--> 917         hook.after_create_session(self.tf_sess, self.coord)
    918       return _CoordinatedSession(
    919           _HookedSession(self.tf_sess, self._hooks), self.coord,

~\miniconda3\lib\site-packages\tensorflow\python\training\basic_session_run_hooks.py in after_create_session(self, session, coord)
    600     self._summary_writer.add_meta_graph(meta_graph_def)
    601     # The checkpoint saved here is the state at step "global_step".
--> 602     self._save(session, global_step)
    603     self._timer.update_last_triggered_step(global_step)
    604 

~\miniconda3\lib\site-packages\tensorflow\python\training\basic_session_run_hooks.py in _save(self, session, step)
    632 
    633     logging.info("Saving checkpoints for %d into %s.", step, self._save_path)
--> 634     self._get_saver().save(session, self._save_path, global_step=step,
    635                            write_meta_graph=self._save_graph_def)
    636     self._summary_writer.add_session_log(

~\miniconda3\lib\site-packages\tensorflow\python\training\saver.py in save(self, sess, save_path, global_step, latest_filename, meta_graph_suffix, write_meta_graph, write_state, strip_default_attrs, save_debug_info)
   1270           model_checkpoint_path = self.saver_def.save_tensor_name
   1271         else:
-> 1272           model_checkpoint_path = sess.run(
   1273               self.saver_def.save_tensor_name,
   1274               {self.saver_def.filename_tensor_name: checkpoint_file})

~\miniconda3\lib\site-packages\tensorflow\python\client\session.py in run(self, fetches, feed_dict, options, run_metadata)
    965 
    966     try:
--> 967       result = self._run(None, fetches, feed_dict, options_ptr,
    968                          run_metadata_ptr)
    969       if run_metadata:

~\miniconda3\lib\site-packages\tensorflow\python\client\session.py in _run(self, handle, fetches, feed_dict, options, run_metadata)
   1188     # or if the call is a partial run that specifies feeds.
   1189     if final_fetches or final_targets or (handle and feed_dict_tensor):
-> 1190       results = self._do_run(handle, final_targets, final_fetches,
   1191                              feed_dict_tensor, options, run_metadata)
   1192     else:

~\miniconda3\lib\site-packages\tensorflow\python\client\session.py in _do_run(self, handle, target_list, fetch_list, feed_dict, options, run_metadata)
   1368 
   1369     if handle is None:
-> 1370       return self._do_call(_run_fn, feeds, fetches, targets, options,
   1371                            run_metadata)
   1372     else:

~\miniconda3\lib\site-packages\tensorflow\python\client\session.py in _do_call(self, fn, *args)
   1375   def _do_call(self, fn, *args):
   1376     try:
-> 1377       return fn(*args)
   1378     except errors.OpError as e:
   1379       message = compat.as_text(e.message)

~\miniconda3\lib\site-packages\tensorflow\python\client\session.py in _run_fn(feed_dict, fetch_list, target_list, options, run_metadata)
   1358       # Ensure any changes to the graph are reflected in the runtime.
   1359       self._extend_graph()
-> 1360       return self._call_tf_sessionrun(options, feed_dict, fetch_list,
   1361                                       target_list, run_metadata)
   1362 

~\miniconda3\lib\site-packages\tensorflow\python\client\session.py in _call_tf_sessionrun(self, options, feed_dict, fetch_list, target_list, run_metadata)
   1451   def _call_tf_sessionrun(self, options, feed_dict, fetch_list, target_list,
   1452                           run_metadata):
-> 1453     return tf_session.TF_SessionRun_wrapper(self._session, options, feed_dict,
   1454                                             fetch_list, target_list,
   1455                                             run_metadata)

UnicodeDecodeError: 'utf-8' codec can't decode byte 0xd5 in position 143: invalid continuation byte

the error was caused by this line :
saver.save(sess, “./model”)
Because I’m on windows, the computer didn’t like this line, so I changed it with
saver.save(sess, “model\model”)

猜你喜欢

转载自blog.csdn.net/wenyisir/article/details/129668165