kaknikhil commented on a change in pull request #393: DL: Add support for
reporting various metrics in fit/evaluate
URL: https://github.com/apache/madlib/pull/393#discussion_r286271295
##########
File path: src/ports/postgres/modules/deep_learning/madlib_keras.py_in
##########
@@ -638,21 +611,21 @@ def internal_keras_eval_transition(state, dependent_var,
independent_var,
SD = kwargs['SD']
device_name = get_device_name_and_set_cuda_env(gpus_per_host,
current_seg_id)
- agg_loss, agg_accuracy, agg_image_count = state
+ agg_loss, agg_metric, agg_image_count = state
if not agg_image_count:
if not is_platform_pg():
set_keras_session(gpus_per_host, segments_per_host)
model = model_from_json(model_architecture)
model_shapes = madlib_keras_serializer.get_model_shapes(model)
- _, _, _, model_weights = madlib_keras_serializer.deserialize_weights(
+ _, model_weights = madlib_keras_serializer.deserialize_weights(
model_data, model_shapes)
model.set_weights(model_weights)
with K.tf.device(device_name):
compile_model(model, compile_params)
SD['segment_model'] = model
# These should already be 0, but just in case make sure
- agg_accuracy = 0
+ agg_metric = 0
Review comment:
yeah i think it's fine to let keras fail for `metrics=['']`
----------------------------------------------------------------
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
For queries about this service, please contact Infrastructure at:
[email protected]
With regards,
Apache Git Services