diff --git a/deepexplain/tensorflow/methods.py b/deepexplain/tensorflow/methods.py index 8301b25..a2996e7 100644 --- a/deepexplain/tensorflow/methods.py +++ b/deepexplain/tensorflow/methods.py @@ -73,7 +73,7 @@ def __init__(self, T, X, session, keras_learning_phase=None): self.Y_shape = [None,] + T.get_shape().as_list()[1:] # Most often T contains multiple output units. In this case, it is often necessary to select # a single unit to compute contributions for. This can be achieved passing 'ys' as weight for the output Tensor. - self.Y = tf.placeholder(tf.float32, self.Y_shape) + self.Y = tf.compat.v1.placeholder(tf.float32, self.Y_shape) # placeholder_from_data(ys) if ys is not None else 1.0 # Tensor that represents weights for T self.T = self.T * self.Y self.symbolic_attribution = None @@ -373,7 +373,7 @@ def _init_references(self): sys.stdout.flush() self._deeplift_ref.clear() ops = [] - g = tf.get_default_graph() + g = tf.compat.v1.get_default_graph() for op in g.get_operations(): if len(op.inputs) > 0 and not op.name.startswith('gradients'): if op.type in SUPPORTED_ACTIVATIONS: @@ -556,7 +556,7 @@ def deepexplain_grad(op, grad): class DeepExplain(object): - def __init__(self, graph=None, session=tf.get_default_session()): + def __init__(self, graph=None, session=tf.compat.v1.get_default_session()): self.method = None self.batch_size = None self.session = session @@ -635,7 +635,7 @@ def _check_ops(self): and needs to be passed in feed_dict. :return: """ - g = tf.get_default_graph() + g = tf.compat.v1.get_default_graph() for op in g.get_operations(): if len(op.inputs) > 0 and not op.name.startswith('gradients'): if op.type in UNSUPPORTED_ACTIVATIONS: @@ -644,7 +644,3 @@ def _check_ops(self): elif 'keras_learning_phase' in op.name: self.keras_phase_placeholder = op.outputs[0] - - - -