Normalizing Flows
  • About
  • API
  • Examples
    • Augmented Normalizing Flow based on Real NVP
    • Changing the base distribution of a flow model
    • Mixed Circular and Normal Neural Spline Flow
    • Comparison of Planar, Radial, and Affine Coupling Flows
    • Conditional Normalizing Flow Model
    • Glow
    • Learn Distribution given by an Image using Real NVP
    • Neural Spline Flow
    • Neural Spline Flow on a Circular and a Normal Coordinate
    • Planar flow
    • Real NVP
    • Residual Flow
    • Variational Autoencoder with Normalizing Flows
  • Search
  • Previous
  • Next
  • Planar flow

Planar flow¶

In [ ]:
Copied!
# Import required packages
import torch
import numpy as np
import normflows as nf

from matplotlib import pyplot as plt
from tqdm import tqdm
# Import required packages import torch import numpy as np import normflows as nf from matplotlib import pyplot as plt from tqdm import tqdm
In [ ]:
Copied!
K = 16
#torch.manual_seed(0)

# Move model on GPU if available
enable_cuda = True
device = torch.device('cuda' if torch.cuda.is_available() and enable_cuda else 'cpu')

flows = []
for i in range(K):
    flows += [nf.flows.Planar((2,))]
target = nf.distributions.TwoModes(2, 0.1)

q0 = nf.distributions.DiagGaussian(2)
nfm = nf.NormalizingFlow(q0=q0, flows=flows, p=target)
nfm.to(device)
K = 16 #torch.manual_seed(0) # Move model on GPU if available enable_cuda = True device = torch.device('cuda' if torch.cuda.is_available() and enable_cuda else 'cpu') flows = [] for i in range(K): flows += [nf.flows.Planar((2,))] target = nf.distributions.TwoModes(2, 0.1) q0 = nf.distributions.DiagGaussian(2) nfm = nf.NormalizingFlow(q0=q0, flows=flows, p=target) nfm.to(device)
In [ ]:
Copied!
# Plot target distribution
grid_size = 200
xx, yy = torch.meshgrid(torch.linspace(-3, 3, grid_size), torch.linspace(-3, 3, grid_size))
z = torch.cat([xx.unsqueeze(2), yy.unsqueeze(2)], 2).view(-1, 2)
log_prob = target.log_prob(z.to(device)).to('cpu').view(*xx.shape)
prob = torch.exp(log_prob)

plt.figure(figsize=(10, 10))
plt.pcolormesh(xx, yy, prob)
plt.show()

# Plot initial flow distribution
z, _ = nfm.sample(num_samples=2 ** 20)
z_np = z.to('cpu').data.numpy()
plt.figure(figsize=(10, 10))
plt.hist2d(z_np[:, 0].flatten(), z_np[:, 1].flatten(), (grid_size, grid_size), range=[[-3, 3], [-3, 3]])
plt.show()
# Plot target distribution grid_size = 200 xx, yy = torch.meshgrid(torch.linspace(-3, 3, grid_size), torch.linspace(-3, 3, grid_size)) z = torch.cat([xx.unsqueeze(2), yy.unsqueeze(2)], 2).view(-1, 2) log_prob = target.log_prob(z.to(device)).to('cpu').view(*xx.shape) prob = torch.exp(log_prob) plt.figure(figsize=(10, 10)) plt.pcolormesh(xx, yy, prob) plt.show() # Plot initial flow distribution z, _ = nfm.sample(num_samples=2 ** 20) z_np = z.to('cpu').data.numpy() plt.figure(figsize=(10, 10)) plt.hist2d(z_np[:, 0].flatten(), z_np[:, 1].flatten(), (grid_size, grid_size), range=[[-3, 3], [-3, 3]]) plt.show()
In [ ]:
Copied!
# Train model
max_iter = 20000
num_samples = 2 * 20
anneal_iter = 10000
annealing = True
show_iter = 2000


loss_hist = np.array([])

optimizer = torch.optim.Adam(nfm.parameters(), lr=1e-3, weight_decay=1e-4)
for it in tqdm(range(max_iter)):
    optimizer.zero_grad()
    if annealing:
        loss = nfm.reverse_kld(num_samples, beta=np.min([1., 0.01 + it / anneal_iter]))
    else:
        loss = nfm.reverse_kld(num_samples)
    loss.backward()
    optimizer.step()
    
    loss_hist = np.append(loss_hist, loss.to('cpu').data.numpy())
    
    # Plot learned distribution
    if (it + 1) % show_iter == 0:
        torch.cuda.manual_seed(0)
        z, _ = nfm.sample(num_samples=2 ** 20)
        z_np = z.to('cpu').data.numpy()
        
        plt.figure(figsize=(10, 10))
        plt.hist2d(z_np[:, 0].flatten(), z_np[:, 1].flatten(), (grid_size, grid_size), range=[[-3, 3], [-3, 3]])
        plt.show()

plt.figure(figsize=(10, 10))
plt.plot(loss_hist, label='loss')
plt.legend()
plt.show()
# Train model max_iter = 20000 num_samples = 2 * 20 anneal_iter = 10000 annealing = True show_iter = 2000 loss_hist = np.array([]) optimizer = torch.optim.Adam(nfm.parameters(), lr=1e-3, weight_decay=1e-4) for it in tqdm(range(max_iter)): optimizer.zero_grad() if annealing: loss = nfm.reverse_kld(num_samples, beta=np.min([1., 0.01 + it / anneal_iter])) else: loss = nfm.reverse_kld(num_samples) loss.backward() optimizer.step() loss_hist = np.append(loss_hist, loss.to('cpu').data.numpy()) # Plot learned distribution if (it + 1) % show_iter == 0: torch.cuda.manual_seed(0) z, _ = nfm.sample(num_samples=2 ** 20) z_np = z.to('cpu').data.numpy() plt.figure(figsize=(10, 10)) plt.hist2d(z_np[:, 0].flatten(), z_np[:, 1].flatten(), (grid_size, grid_size), range=[[-3, 3], [-3, 3]]) plt.show() plt.figure(figsize=(10, 10)) plt.plot(loss_hist, label='loss') plt.legend() plt.show()
In [ ]:
Copied!
# Plot learned distribution
z, _ = nfm.sample(num_samples=2 ** 20)
z_np = z.to('cpu').data.numpy()
plt.figure(figsize=(10, 10))
plt.hist2d(z_np[:, 0].flatten(), z_np[:, 1].flatten(), (grid_size, grid_size), range=[[-3, 3], [-3, 3]])
plt.show()
# Plot learned distribution z, _ = nfm.sample(num_samples=2 ** 20) z_np = z.to('cpu').data.numpy() plt.figure(figsize=(10, 10)) plt.hist2d(z_np[:, 0].flatten(), z_np[:, 1].flatten(), (grid_size, grid_size), range=[[-3, 3], [-3, 3]]) plt.show()

Documentation built with MkDocs.

Search

From here you can search these documents. Enter your search terms below.

Keyboard Shortcuts

Keys Action
? Open this help
n Next page
p Previous page
s Search