Tensorflow DeepDream Example Memory Leak?












0















I've got a jupyter notebook from the Google Colab seedbank and it has an example for Deap Dream in which it iterates on the output image and zooms after each iteration thus creating the zooming effect. I've noticed that after many zooming steps, my computer runs out of ram. I've heard people say how you shouldn't build the graph during iterations but I'm not sure where in my code I'm doing that?



with open("pumpkinresized.jpg", 'rb') as f:
file_contents = f.read()

#from PIL import Image
#img = Image.open('pumpkin.jpg')
#new_img = img.resize((500,500))
#new_img.save("pumpkinresized.jpg", "JPEG", optimize=True)
#file_contents = new_img.read()

from io import BytesIO
from IPython.display import clear_output, Image, display
import numpy as np
import PIL.Image
import tensorflow as tf
from __future__ import print_function
import os
import zipfile
import matplotlib.pyplot

model_fn = 'tensorflow_inception_graph.pb'

# creating TensorFlow session and loading the model
graph = tf.Graph()
sess = tf.InteractiveSession(graph=graph)
with tf.gfile.FastGFile(model_fn, 'rb') as f:
graph_def = tf.GraphDef()
graph_def.ParseFromString(f.read())
t_input = tf.placeholder(np.float32, name='input') # define the input tensor
imagenet_mean = 117.0
t_preprocessed = tf.expand_dims(t_input-imagenet_mean, 0)
tf.import_graph_def(graph_def, {'input':t_preprocessed})

def T(layer):
'''Helper for getting layer output tensor'''
return graph.get_tensor_by_name("import/%s:0"%layer)


def showarray(a, fmt='jpeg',i=0):
a = np.uint8(np.clip(a, 0, 255))
f = BytesIO()
PIL.Image.fromarray(a).save(f, fmt)
matplotlib.pyplot.imsave(str(i) + ".png",a)
display(Image(data=f.getvalue()))
img0 = sess.run(tf.image.decode_image(file_contents))
showarray(img0)


octave_n = 4
octave_scale = 1.4
iter_n = 10
strength = 200

# Helper function that uses TensorFlow to resize an image
def resize(img, new_size):
return sess.run(tf.image.resize_bilinear(img[np.newaxis,:], new_size))[0]

# Apply gradients to an image in a seires of tiles
def calc_grad_tiled(img, t_grad, tile_size=256):
'''Random shifts are applied to the image to blur tile boundaries over
multiple iterations.'''
h, w = img.shape[:2]
sx, sy = np.random.randint(tile_size, size=2)
# We randomly roll the image in x and y to avoid seams between tiles.
img_shift = np.roll(np.roll(img, sx, 1), sy, 0)
grad = np.zeros_like(img)
for y in range(0, max(h-tile_size//2, tile_size),tile_size):
for x in range(0, max(w-tile_size//2, tile_size),tile_size):
sub = img_shift[y:y+tile_size,x:x+tile_size]
g = sess.run(t_grad, {t_input:sub})
grad[y:y+tile_size,x:x+tile_size] = g
imggrad = np.roll(np.roll(grad, -sx, 1), -sy, 0)
# Add the image gradient to the image and return the result
return img + imggrad*(strength * 0.01 / (np.abs(imggrad).mean()+1e-7))

# Applies deepdream at multiple scales
def render_deepdream(t_obj, input_img, show_steps = True):
# Collapse the optimization objective to a single number (the loss)
t_score = tf.reduce_mean(t_obj)
# We need the gradient of the image with respect to the objective
t_grad = tf.gradients(t_score, t_input)[0]

# split the image into a number of octaves (laplacian pyramid)
img = input_img
octaves =
for i in range(octave_n-1):
lo = resize(img, np.int32(np.float32(img.shape[:2])/octave_scale))
octaves.append(img-resize(lo, img.shape[:2]))
img = lo

# generate details octave by octave
for octave in range(octave_n):
if octave>0:
hi = octaves[-octave]
img = resize(img, hi.shape[:2])+hi
for i in range(iter_n):
img = calc_grad_tiled(img, t_grad)
if show_steps:
clear_output()
showarray(img)
return img


And here's the loop:



layer = "mixed4d_3x3_bottleneck_pre_relu"  #@param ["mixed4d_3x3_bottleneck_pre_relu", "mixed3a", "mixed3b", "mixed4a", "mixed4c", "mixed5a"]
iter_n = 12 #@param {type:"slider", max: 50}
strength = 120 #@param {type:"slider", max: 1000}
zooming_steps = 500 #@param {type:"slider", max: 512}
zoom_factor = 1.1 #@param {type:"number"}

tf.get_default_graph().finalize

frame = img0
img_y, img_x, _ = img0.shape
for i in range(zooming_steps):
if i > 20:
layer = "mixed3a"
if i > 40:
layer = "mixed4a"

if i > 70:
layer = "mixed4d_3x3_bottleneck_pre_relu"

if i > 100:
layer = "mixed5a"

frame = render_deepdream(tf.square(T(layer)), frame, False)
clear_output()
showarray(frame,i=i)
print("iteration: " +str(i))
newsize = np.int32(np.float32(frame.shape[:2])*zoom_factor)
frame = resize(frame, newsize)
frame = frame[(newsize[0]-img_y)//2:(newsize[0]-img_y)//2+img_y,
(newsize[1]-img_x)//2:(newsize[1]-img_x)//2+img_x,:]


Any ideas what I can change to prevent the memory leak? Is there something in the loop that shouldn't be?










share|improve this question





























    0















    I've got a jupyter notebook from the Google Colab seedbank and it has an example for Deap Dream in which it iterates on the output image and zooms after each iteration thus creating the zooming effect. I've noticed that after many zooming steps, my computer runs out of ram. I've heard people say how you shouldn't build the graph during iterations but I'm not sure where in my code I'm doing that?



    with open("pumpkinresized.jpg", 'rb') as f:
    file_contents = f.read()

    #from PIL import Image
    #img = Image.open('pumpkin.jpg')
    #new_img = img.resize((500,500))
    #new_img.save("pumpkinresized.jpg", "JPEG", optimize=True)
    #file_contents = new_img.read()

    from io import BytesIO
    from IPython.display import clear_output, Image, display
    import numpy as np
    import PIL.Image
    import tensorflow as tf
    from __future__ import print_function
    import os
    import zipfile
    import matplotlib.pyplot

    model_fn = 'tensorflow_inception_graph.pb'

    # creating TensorFlow session and loading the model
    graph = tf.Graph()
    sess = tf.InteractiveSession(graph=graph)
    with tf.gfile.FastGFile(model_fn, 'rb') as f:
    graph_def = tf.GraphDef()
    graph_def.ParseFromString(f.read())
    t_input = tf.placeholder(np.float32, name='input') # define the input tensor
    imagenet_mean = 117.0
    t_preprocessed = tf.expand_dims(t_input-imagenet_mean, 0)
    tf.import_graph_def(graph_def, {'input':t_preprocessed})

    def T(layer):
    '''Helper for getting layer output tensor'''
    return graph.get_tensor_by_name("import/%s:0"%layer)


    def showarray(a, fmt='jpeg',i=0):
    a = np.uint8(np.clip(a, 0, 255))
    f = BytesIO()
    PIL.Image.fromarray(a).save(f, fmt)
    matplotlib.pyplot.imsave(str(i) + ".png",a)
    display(Image(data=f.getvalue()))
    img0 = sess.run(tf.image.decode_image(file_contents))
    showarray(img0)


    octave_n = 4
    octave_scale = 1.4
    iter_n = 10
    strength = 200

    # Helper function that uses TensorFlow to resize an image
    def resize(img, new_size):
    return sess.run(tf.image.resize_bilinear(img[np.newaxis,:], new_size))[0]

    # Apply gradients to an image in a seires of tiles
    def calc_grad_tiled(img, t_grad, tile_size=256):
    '''Random shifts are applied to the image to blur tile boundaries over
    multiple iterations.'''
    h, w = img.shape[:2]
    sx, sy = np.random.randint(tile_size, size=2)
    # We randomly roll the image in x and y to avoid seams between tiles.
    img_shift = np.roll(np.roll(img, sx, 1), sy, 0)
    grad = np.zeros_like(img)
    for y in range(0, max(h-tile_size//2, tile_size),tile_size):
    for x in range(0, max(w-tile_size//2, tile_size),tile_size):
    sub = img_shift[y:y+tile_size,x:x+tile_size]
    g = sess.run(t_grad, {t_input:sub})
    grad[y:y+tile_size,x:x+tile_size] = g
    imggrad = np.roll(np.roll(grad, -sx, 1), -sy, 0)
    # Add the image gradient to the image and return the result
    return img + imggrad*(strength * 0.01 / (np.abs(imggrad).mean()+1e-7))

    # Applies deepdream at multiple scales
    def render_deepdream(t_obj, input_img, show_steps = True):
    # Collapse the optimization objective to a single number (the loss)
    t_score = tf.reduce_mean(t_obj)
    # We need the gradient of the image with respect to the objective
    t_grad = tf.gradients(t_score, t_input)[0]

    # split the image into a number of octaves (laplacian pyramid)
    img = input_img
    octaves =
    for i in range(octave_n-1):
    lo = resize(img, np.int32(np.float32(img.shape[:2])/octave_scale))
    octaves.append(img-resize(lo, img.shape[:2]))
    img = lo

    # generate details octave by octave
    for octave in range(octave_n):
    if octave>0:
    hi = octaves[-octave]
    img = resize(img, hi.shape[:2])+hi
    for i in range(iter_n):
    img = calc_grad_tiled(img, t_grad)
    if show_steps:
    clear_output()
    showarray(img)
    return img


    And here's the loop:



    layer = "mixed4d_3x3_bottleneck_pre_relu"  #@param ["mixed4d_3x3_bottleneck_pre_relu", "mixed3a", "mixed3b", "mixed4a", "mixed4c", "mixed5a"]
    iter_n = 12 #@param {type:"slider", max: 50}
    strength = 120 #@param {type:"slider", max: 1000}
    zooming_steps = 500 #@param {type:"slider", max: 512}
    zoom_factor = 1.1 #@param {type:"number"}

    tf.get_default_graph().finalize

    frame = img0
    img_y, img_x, _ = img0.shape
    for i in range(zooming_steps):
    if i > 20:
    layer = "mixed3a"
    if i > 40:
    layer = "mixed4a"

    if i > 70:
    layer = "mixed4d_3x3_bottleneck_pre_relu"

    if i > 100:
    layer = "mixed5a"

    frame = render_deepdream(tf.square(T(layer)), frame, False)
    clear_output()
    showarray(frame,i=i)
    print("iteration: " +str(i))
    newsize = np.int32(np.float32(frame.shape[:2])*zoom_factor)
    frame = resize(frame, newsize)
    frame = frame[(newsize[0]-img_y)//2:(newsize[0]-img_y)//2+img_y,
    (newsize[1]-img_x)//2:(newsize[1]-img_x)//2+img_x,:]


    Any ideas what I can change to prevent the memory leak? Is there something in the loop that shouldn't be?










    share|improve this question



























      0












      0








      0








      I've got a jupyter notebook from the Google Colab seedbank and it has an example for Deap Dream in which it iterates on the output image and zooms after each iteration thus creating the zooming effect. I've noticed that after many zooming steps, my computer runs out of ram. I've heard people say how you shouldn't build the graph during iterations but I'm not sure where in my code I'm doing that?



      with open("pumpkinresized.jpg", 'rb') as f:
      file_contents = f.read()

      #from PIL import Image
      #img = Image.open('pumpkin.jpg')
      #new_img = img.resize((500,500))
      #new_img.save("pumpkinresized.jpg", "JPEG", optimize=True)
      #file_contents = new_img.read()

      from io import BytesIO
      from IPython.display import clear_output, Image, display
      import numpy as np
      import PIL.Image
      import tensorflow as tf
      from __future__ import print_function
      import os
      import zipfile
      import matplotlib.pyplot

      model_fn = 'tensorflow_inception_graph.pb'

      # creating TensorFlow session and loading the model
      graph = tf.Graph()
      sess = tf.InteractiveSession(graph=graph)
      with tf.gfile.FastGFile(model_fn, 'rb') as f:
      graph_def = tf.GraphDef()
      graph_def.ParseFromString(f.read())
      t_input = tf.placeholder(np.float32, name='input') # define the input tensor
      imagenet_mean = 117.0
      t_preprocessed = tf.expand_dims(t_input-imagenet_mean, 0)
      tf.import_graph_def(graph_def, {'input':t_preprocessed})

      def T(layer):
      '''Helper for getting layer output tensor'''
      return graph.get_tensor_by_name("import/%s:0"%layer)


      def showarray(a, fmt='jpeg',i=0):
      a = np.uint8(np.clip(a, 0, 255))
      f = BytesIO()
      PIL.Image.fromarray(a).save(f, fmt)
      matplotlib.pyplot.imsave(str(i) + ".png",a)
      display(Image(data=f.getvalue()))
      img0 = sess.run(tf.image.decode_image(file_contents))
      showarray(img0)


      octave_n = 4
      octave_scale = 1.4
      iter_n = 10
      strength = 200

      # Helper function that uses TensorFlow to resize an image
      def resize(img, new_size):
      return sess.run(tf.image.resize_bilinear(img[np.newaxis,:], new_size))[0]

      # Apply gradients to an image in a seires of tiles
      def calc_grad_tiled(img, t_grad, tile_size=256):
      '''Random shifts are applied to the image to blur tile boundaries over
      multiple iterations.'''
      h, w = img.shape[:2]
      sx, sy = np.random.randint(tile_size, size=2)
      # We randomly roll the image in x and y to avoid seams between tiles.
      img_shift = np.roll(np.roll(img, sx, 1), sy, 0)
      grad = np.zeros_like(img)
      for y in range(0, max(h-tile_size//2, tile_size),tile_size):
      for x in range(0, max(w-tile_size//2, tile_size),tile_size):
      sub = img_shift[y:y+tile_size,x:x+tile_size]
      g = sess.run(t_grad, {t_input:sub})
      grad[y:y+tile_size,x:x+tile_size] = g
      imggrad = np.roll(np.roll(grad, -sx, 1), -sy, 0)
      # Add the image gradient to the image and return the result
      return img + imggrad*(strength * 0.01 / (np.abs(imggrad).mean()+1e-7))

      # Applies deepdream at multiple scales
      def render_deepdream(t_obj, input_img, show_steps = True):
      # Collapse the optimization objective to a single number (the loss)
      t_score = tf.reduce_mean(t_obj)
      # We need the gradient of the image with respect to the objective
      t_grad = tf.gradients(t_score, t_input)[0]

      # split the image into a number of octaves (laplacian pyramid)
      img = input_img
      octaves =
      for i in range(octave_n-1):
      lo = resize(img, np.int32(np.float32(img.shape[:2])/octave_scale))
      octaves.append(img-resize(lo, img.shape[:2]))
      img = lo

      # generate details octave by octave
      for octave in range(octave_n):
      if octave>0:
      hi = octaves[-octave]
      img = resize(img, hi.shape[:2])+hi
      for i in range(iter_n):
      img = calc_grad_tiled(img, t_grad)
      if show_steps:
      clear_output()
      showarray(img)
      return img


      And here's the loop:



      layer = "mixed4d_3x3_bottleneck_pre_relu"  #@param ["mixed4d_3x3_bottleneck_pre_relu", "mixed3a", "mixed3b", "mixed4a", "mixed4c", "mixed5a"]
      iter_n = 12 #@param {type:"slider", max: 50}
      strength = 120 #@param {type:"slider", max: 1000}
      zooming_steps = 500 #@param {type:"slider", max: 512}
      zoom_factor = 1.1 #@param {type:"number"}

      tf.get_default_graph().finalize

      frame = img0
      img_y, img_x, _ = img0.shape
      for i in range(zooming_steps):
      if i > 20:
      layer = "mixed3a"
      if i > 40:
      layer = "mixed4a"

      if i > 70:
      layer = "mixed4d_3x3_bottleneck_pre_relu"

      if i > 100:
      layer = "mixed5a"

      frame = render_deepdream(tf.square(T(layer)), frame, False)
      clear_output()
      showarray(frame,i=i)
      print("iteration: " +str(i))
      newsize = np.int32(np.float32(frame.shape[:2])*zoom_factor)
      frame = resize(frame, newsize)
      frame = frame[(newsize[0]-img_y)//2:(newsize[0]-img_y)//2+img_y,
      (newsize[1]-img_x)//2:(newsize[1]-img_x)//2+img_x,:]


      Any ideas what I can change to prevent the memory leak? Is there something in the loop that shouldn't be?










      share|improve this question
















      I've got a jupyter notebook from the Google Colab seedbank and it has an example for Deap Dream in which it iterates on the output image and zooms after each iteration thus creating the zooming effect. I've noticed that after many zooming steps, my computer runs out of ram. I've heard people say how you shouldn't build the graph during iterations but I'm not sure where in my code I'm doing that?



      with open("pumpkinresized.jpg", 'rb') as f:
      file_contents = f.read()

      #from PIL import Image
      #img = Image.open('pumpkin.jpg')
      #new_img = img.resize((500,500))
      #new_img.save("pumpkinresized.jpg", "JPEG", optimize=True)
      #file_contents = new_img.read()

      from io import BytesIO
      from IPython.display import clear_output, Image, display
      import numpy as np
      import PIL.Image
      import tensorflow as tf
      from __future__ import print_function
      import os
      import zipfile
      import matplotlib.pyplot

      model_fn = 'tensorflow_inception_graph.pb'

      # creating TensorFlow session and loading the model
      graph = tf.Graph()
      sess = tf.InteractiveSession(graph=graph)
      with tf.gfile.FastGFile(model_fn, 'rb') as f:
      graph_def = tf.GraphDef()
      graph_def.ParseFromString(f.read())
      t_input = tf.placeholder(np.float32, name='input') # define the input tensor
      imagenet_mean = 117.0
      t_preprocessed = tf.expand_dims(t_input-imagenet_mean, 0)
      tf.import_graph_def(graph_def, {'input':t_preprocessed})

      def T(layer):
      '''Helper for getting layer output tensor'''
      return graph.get_tensor_by_name("import/%s:0"%layer)


      def showarray(a, fmt='jpeg',i=0):
      a = np.uint8(np.clip(a, 0, 255))
      f = BytesIO()
      PIL.Image.fromarray(a).save(f, fmt)
      matplotlib.pyplot.imsave(str(i) + ".png",a)
      display(Image(data=f.getvalue()))
      img0 = sess.run(tf.image.decode_image(file_contents))
      showarray(img0)


      octave_n = 4
      octave_scale = 1.4
      iter_n = 10
      strength = 200

      # Helper function that uses TensorFlow to resize an image
      def resize(img, new_size):
      return sess.run(tf.image.resize_bilinear(img[np.newaxis,:], new_size))[0]

      # Apply gradients to an image in a seires of tiles
      def calc_grad_tiled(img, t_grad, tile_size=256):
      '''Random shifts are applied to the image to blur tile boundaries over
      multiple iterations.'''
      h, w = img.shape[:2]
      sx, sy = np.random.randint(tile_size, size=2)
      # We randomly roll the image in x and y to avoid seams between tiles.
      img_shift = np.roll(np.roll(img, sx, 1), sy, 0)
      grad = np.zeros_like(img)
      for y in range(0, max(h-tile_size//2, tile_size),tile_size):
      for x in range(0, max(w-tile_size//2, tile_size),tile_size):
      sub = img_shift[y:y+tile_size,x:x+tile_size]
      g = sess.run(t_grad, {t_input:sub})
      grad[y:y+tile_size,x:x+tile_size] = g
      imggrad = np.roll(np.roll(grad, -sx, 1), -sy, 0)
      # Add the image gradient to the image and return the result
      return img + imggrad*(strength * 0.01 / (np.abs(imggrad).mean()+1e-7))

      # Applies deepdream at multiple scales
      def render_deepdream(t_obj, input_img, show_steps = True):
      # Collapse the optimization objective to a single number (the loss)
      t_score = tf.reduce_mean(t_obj)
      # We need the gradient of the image with respect to the objective
      t_grad = tf.gradients(t_score, t_input)[0]

      # split the image into a number of octaves (laplacian pyramid)
      img = input_img
      octaves =
      for i in range(octave_n-1):
      lo = resize(img, np.int32(np.float32(img.shape[:2])/octave_scale))
      octaves.append(img-resize(lo, img.shape[:2]))
      img = lo

      # generate details octave by octave
      for octave in range(octave_n):
      if octave>0:
      hi = octaves[-octave]
      img = resize(img, hi.shape[:2])+hi
      for i in range(iter_n):
      img = calc_grad_tiled(img, t_grad)
      if show_steps:
      clear_output()
      showarray(img)
      return img


      And here's the loop:



      layer = "mixed4d_3x3_bottleneck_pre_relu"  #@param ["mixed4d_3x3_bottleneck_pre_relu", "mixed3a", "mixed3b", "mixed4a", "mixed4c", "mixed5a"]
      iter_n = 12 #@param {type:"slider", max: 50}
      strength = 120 #@param {type:"slider", max: 1000}
      zooming_steps = 500 #@param {type:"slider", max: 512}
      zoom_factor = 1.1 #@param {type:"number"}

      tf.get_default_graph().finalize

      frame = img0
      img_y, img_x, _ = img0.shape
      for i in range(zooming_steps):
      if i > 20:
      layer = "mixed3a"
      if i > 40:
      layer = "mixed4a"

      if i > 70:
      layer = "mixed4d_3x3_bottleneck_pre_relu"

      if i > 100:
      layer = "mixed5a"

      frame = render_deepdream(tf.square(T(layer)), frame, False)
      clear_output()
      showarray(frame,i=i)
      print("iteration: " +str(i))
      newsize = np.int32(np.float32(frame.shape[:2])*zoom_factor)
      frame = resize(frame, newsize)
      frame = frame[(newsize[0]-img_y)//2:(newsize[0]-img_y)//2+img_y,
      (newsize[1]-img_x)//2:(newsize[1]-img_x)//2+img_x,:]


      Any ideas what I can change to prevent the memory leak? Is there something in the loop that shouldn't be?







      python tensorflow deep-dream






      share|improve this question















      share|improve this question













      share|improve this question




      share|improve this question








      edited Nov 16 '18 at 10:07









      Milo Lu

      1,65311628




      1,65311628










      asked Nov 16 '18 at 6:55









      user3011512user3011512

      45




      45
























          0






          active

          oldest

          votes












          Your Answer






          StackExchange.ifUsing("editor", function () {
          StackExchange.using("externalEditor", function () {
          StackExchange.using("snippets", function () {
          StackExchange.snippets.init();
          });
          });
          }, "code-snippets");

          StackExchange.ready(function() {
          var channelOptions = {
          tags: "".split(" "),
          id: "1"
          };
          initTagRenderer("".split(" "), "".split(" "), channelOptions);

          StackExchange.using("externalEditor", function() {
          // Have to fire editor after snippets, if snippets enabled
          if (StackExchange.settings.snippets.snippetsEnabled) {
          StackExchange.using("snippets", function() {
          createEditor();
          });
          }
          else {
          createEditor();
          }
          });

          function createEditor() {
          StackExchange.prepareEditor({
          heartbeatType: 'answer',
          autoActivateHeartbeat: false,
          convertImagesToLinks: true,
          noModals: true,
          showLowRepImageUploadWarning: true,
          reputationToPostImages: 10,
          bindNavPrevention: true,
          postfix: "",
          imageUploader: {
          brandingHtml: "Powered by u003ca class="icon-imgur-white" href="https://imgur.com/"u003eu003c/au003e",
          contentPolicyHtml: "User contributions licensed under u003ca href="https://creativecommons.org/licenses/by-sa/3.0/"u003ecc by-sa 3.0 with attribution requiredu003c/au003e u003ca href="https://stackoverflow.com/legal/content-policy"u003e(content policy)u003c/au003e",
          allowUrls: true
          },
          onDemand: true,
          discardSelector: ".discard-answer"
          ,immediatelyShowMarkdownHelp:true
          });


          }
          });














          draft saved

          draft discarded


















          StackExchange.ready(
          function () {
          StackExchange.openid.initPostLogin('.new-post-login', 'https%3a%2f%2fstackoverflow.com%2fquestions%2f53332852%2ftensorflow-deepdream-example-memory-leak%23new-answer', 'question_page');
          }
          );

          Post as a guest















          Required, but never shown

























          0






          active

          oldest

          votes








          0






          active

          oldest

          votes









          active

          oldest

          votes






          active

          oldest

          votes
















          draft saved

          draft discarded




















































          Thanks for contributing an answer to Stack Overflow!


          • Please be sure to answer the question. Provide details and share your research!

          But avoid



          • Asking for help, clarification, or responding to other answers.

          • Making statements based on opinion; back them up with references or personal experience.


          To learn more, see our tips on writing great answers.




          draft saved


          draft discarded














          StackExchange.ready(
          function () {
          StackExchange.openid.initPostLogin('.new-post-login', 'https%3a%2f%2fstackoverflow.com%2fquestions%2f53332852%2ftensorflow-deepdream-example-memory-leak%23new-answer', 'question_page');
          }
          );

          Post as a guest















          Required, but never shown





















































          Required, but never shown














          Required, but never shown












          Required, but never shown







          Required, but never shown

































          Required, but never shown














          Required, but never shown












          Required, but never shown







          Required, but never shown







          Popular posts from this blog

          Florida Star v. B. J. F.

          Danny Elfman

          Lugert, Oklahoma