.. code:: python
%matplotlib inline
from d2l import mxnet as d2l
from mxnet import autograd, np, npx, gluon, init
from mxnet.gluon import nn
npx.set_np()
T = 1000 # Generate a total of 1000 points
time = np.arange(1, T + 1, dtype=np.float32)
x = np.sin(0.01 * time) + np.random.normal(0, 0.2, (T,))
d2l.plot(time, [x], 'time', 'x', xlim=[1, 1000], figsize=(6, 3))
.. figure:: output_sequence_ce248f_3_0.svg
.. raw:: html

.. raw:: html
.. code:: python
%matplotlib inline
from d2l import torch as d2l
import torch
import torch.nn as nn
T = 1000 # Generate a total of 1000 points
time = torch.arange(1, T + 1, dtype=torch.float32)
x = torch.sin(0.01 * time) + torch.normal(0, 0.2, (T,))
d2l.plot(time, [x], 'time', 'x', xlim=[1, 1000], figsize=(6, 3))
.. figure:: output_sequence_ce248f_6_0.svg
.. raw:: html

.. raw:: html
.. code:: python
%matplotlib inline
from d2l import tensorflow as d2l
import tensorflow as tf
T = 1000 # Generate a total of 1000 points
time = tf.range(1, T + 1, dtype=tf.float32)
x = tf.sin(0.01 * time) + tf.random.normal([T], 0, 0.2)
d2l.plot(time, [x], 'time', 'x', xlim=[1, 1000], figsize=(6, 3))
.. figure:: output_sequence_ce248f_9_0.svg
.. raw:: html

.. raw:: html
.. code:: python
tau = 4
features = np.zeros((T - tau, tau))
for i in range(tau):
features[:, i] = x[i: T - tau + i]
labels = d2l.reshape(x[tau:], (-1, 1))
batch_size, n_train = 16, 600
# Only the first `n_train` examples are used for training
train_iter = d2l.load_array((features[:n_train], labels[:n_train]),
batch_size, is_train=True)
.. raw:: html

.. raw:: html
.. code:: python
tau = 4
features = torch.zeros((T - tau, tau))
for i in range(tau):
features[:, i] = x[i: T - tau + i]
labels = d2l.reshape(x[tau:], (-1, 1))
batch_size, n_train = 16, 600
# Only the first `n_train` examples are used for training
train_iter = d2l.load_array((features[:n_train], labels[:n_train]),
batch_size, is_train=True)
.. raw:: html

.. raw:: html
.. code:: python
tau = 4
features = tf.Variable(tf.zeros((T - tau, tau)))
for i in range(tau):
features[:, i].assign(x[i: T - tau + i])
labels = tf.reshape(x[tau:], (-1, 1))
batch_size, n_train = 16, 600
# Only the first `n_train` examples are used for training
train_iter = d2l.load_array((features[:n_train], labels[:n_train]),
batch_size, is_train=True)
.. raw:: html

.. raw:: html
.. code:: python
# A simple MLP
def get_net():
net = nn.Sequential()
net.add(nn.Dense(10, activation='relu'),
nn.Dense(1))
net.initialize(init.Xavier())
return net
# Square loss
loss = gluon.loss.L2Loss()
.. raw:: html

.. raw:: html
.. code:: python
# Function for initializing the weights of the network
def init_weights(m):
if type(m) == nn.Linear:
torch.nn.init.xavier_uniform_(m.weight)
# A simple MLP
def get_net():
net = nn.Sequential(nn.Linear(4, 10),
nn.ReLU(),
nn.Linear(10, 1))
net.apply(init_weights)
return net
# Square loss
loss = nn.MSELoss()
.. raw:: html

.. raw:: html
.. code:: python
# Vanilla MLP architecture
def get_net():
net = tf.keras.Sequential([tf.keras.layers.Dense(10, activation='relu'),
tf.keras.layers.Dense(1)])
return net
# Least mean squares loss
# Note: L2 Loss = 1/2 * MSE Loss. TensorFlow has MSE Loss that is slightly
# different from MXNet's L2Loss by a factor of 2. Hence we halve the loss
# value to get L2Loss in TF
loss = tf.keras.losses.MeanSquaredError()
.. raw:: html

.. raw:: html
.. code:: python
def train(net, train_iter, loss, epochs, lr):
trainer = gluon.Trainer(net.collect_params(), 'adam',
{'learning_rate': lr})
for epoch in range(epochs):
for X, y in train_iter:
with autograd.record():
l = loss(net(X), y)
l.backward()
trainer.step(batch_size)
print(f'epoch {epoch + 1}, '
f'loss: {d2l.evaluate_loss(net, train_iter, loss):f}')
net = get_net()
train(net, train_iter, loss, 5, 0.01)
.. parsed-literal::
:class: output
epoch 1, loss: 0.037817
epoch 2, loss: 0.030563
epoch 3, loss: 0.028721
epoch 4, loss: 0.027197
epoch 5, loss: 0.026767
.. raw:: html

.. raw:: html
.. code:: python
def train(net, train_iter, loss, epochs, lr):
trainer = torch.optim.Adam(net.parameters(), lr)
for epoch in range(epochs):
for X, y in train_iter:
trainer.zero_grad()
l = loss(net(X), y)
l.backward()
trainer.step()
print(f'epoch {epoch + 1}, '
f'loss: {d2l.evaluate_loss(net, train_iter, loss):f}')
net = get_net()
train(net, train_iter, loss, 5, 0.01)
.. parsed-literal::
:class: output
epoch 1, loss: 0.059564
epoch 2, loss: 0.052640
epoch 3, loss: 0.051464
epoch 4, loss: 0.050421
epoch 5, loss: 0.051026
.. raw:: html

.. raw:: html
.. code:: python
def train(net, train_iter, loss, epochs, lr):
trainer = tf.keras.optimizers.Adam()
for epoch in range(epochs):
for X, y in train_iter:
with tf.GradientTape() as g:
out = net(X)
l = loss(y, out) / 2
params = net.trainable_variables
grads = g.gradient(l, params)
trainer.apply_gradients(zip(grads, params))
print(f'epoch {epoch + 1}, '
f'loss: {d2l.evaluate_loss(net, train_iter, loss):f}')
net = get_net()
train(net, train_iter, loss, 5, 0.01)
.. parsed-literal::
:class: output
epoch 1, loss: 0.340292
epoch 2, loss: 0.164495
epoch 3, loss: 0.100906
epoch 4, loss: 0.081101
epoch 5, loss: 0.075165
.. raw:: html

.. raw:: html
.. code:: python
onestep_preds = net(features)
d2l.plot([time, time[tau:]], [d2l.numpy(x), d2l.numpy(onestep_preds)], 'time',
'x', legend=['data', '1-step preds'], xlim=[1, 1000],
figsize=(6, 3))
.. figure:: output_sequence_ce248f_51_0.svg
.. raw:: html

.. raw:: html
.. code:: python
onestep_preds = net(features)
d2l.plot([time, time[tau:]], [d2l.numpy(x), d2l.numpy(onestep_preds)], 'time',
'x', legend=['data', '1-step preds'], xlim=[1, 1000],
figsize=(6, 3))
.. figure:: output_sequence_ce248f_54_0.svg
.. raw:: html

.. raw:: html
.. code:: python
onestep_preds = net(features)
d2l.plot([time, time[tau:]], [d2l.numpy(x), d2l.numpy(onestep_preds)], 'time',
'x', legend=['data', '1-step preds'], xlim=[1, 1000],
figsize=(6, 3))
.. figure:: output_sequence_ce248f_57_0.svg
.. raw:: html

.. raw:: html
.. code:: python
multistep_preds = np.zeros(T)
multistep_preds[: n_train + tau] = x[: n_train + tau]
for i in range(n_train + tau, T):
multistep_preds[i] = d2l.reshape(net(
multistep_preds[i - tau: i].reshape(1, -1)), 1)
d2l.plot([time, time[tau:], time[n_train + tau:]],
[d2l.numpy(x), d2l.numpy(onestep_preds),
d2l.numpy(multistep_preds[n_train + tau:])], 'time',
'x', legend=['data', '1-step preds', 'multistep preds'],
xlim=[1, 1000], figsize=(6, 3))
.. figure:: output_sequence_ce248f_63_0.svg
.. raw:: html

.. raw:: html
.. code:: python
multistep_preds = torch.zeros(T)
multistep_preds[: n_train + tau] = x[: n_train + tau]
for i in range(n_train + tau, T):
multistep_preds[i] = d2l.reshape(net(
multistep_preds[i - tau: i].reshape(1, -1)), 1)
d2l.plot([time, time[tau:], time[n_train + tau:]],
[d2l.numpy(x), d2l.numpy(onestep_preds),
d2l.numpy(multistep_preds[n_train + tau:])], 'time',
'x', legend=['data', '1-step preds', 'multistep preds'],
xlim=[1, 1000], figsize=(6, 3))
.. figure:: output_sequence_ce248f_66_0.svg
.. raw:: html

.. raw:: html
.. code:: python
multistep_preds = tf.Variable(tf.zeros(T))
multistep_preds[:n_train + tau].assign(x[:n_train + tau])
for i in range(n_train + tau, T):
multistep_preds[i].assign(tf.reshape(net(
tf.reshape(multistep_preds[i - tau: i], (1, -1))), ()))
d2l.plot([time, time[tau:], time[n_train + tau:]],
[d2l.numpy(x), d2l.numpy(onestep_preds),
d2l.numpy(multistep_preds[n_train + tau:])], 'time',
'x', legend=['data', '1-step preds', 'multistep preds'],
xlim=[1, 1000], figsize=(6, 3))
.. figure:: output_sequence_ce248f_69_0.svg
.. raw:: html

.. raw:: html
.. code:: python
max_steps = 64
features = np.zeros((T - tau - max_steps + 1, tau + max_steps))
# Column `i` (`i` < `tau`) are observations from `x` for time steps from
# `i + 1` to `i + T - tau - max_steps + 1`
for i in range(tau):
features[:, i] = x[i: i + T - tau - max_steps + 1].T
# Column `i` (`i` >= `tau`) are the (`i - tau + 1`)-step-ahead predictions for
# time steps from `i + 1` to `i + T - tau - max_steps + 1`
for i in range(tau, tau + max_steps):
features[:, i] = d2l.reshape(net(features[:, i - tau: i]), -1)
steps = (1, 4, 16, 64)
d2l.plot([time[tau + i - 1: T - max_steps + i] for i in steps],
[d2l.numpy(features[:, tau + i - 1]) for i in steps], 'time', 'x',
legend=[f'{i}-step preds' for i in steps], xlim=[5, 1000],
figsize=(6, 3))
.. figure:: output_sequence_ce248f_75_0.svg
.. raw:: html

.. raw:: html
.. code:: python
max_steps = 64
features = torch.zeros((T - tau - max_steps + 1, tau + max_steps))
# Column `i` (`i` < `tau`) are observations from `x` for time steps from
# `i + 1` to `i + T - tau - max_steps + 1`
for i in range(tau):
features[:, i] = x[i: i + T - tau - max_steps + 1].T
# Column `i` (`i` >= `tau`) are the (`i - tau + 1`)-step-ahead predictions for
# time steps from `i + 1` to `i + T - tau - max_steps + 1`
for i in range(tau, tau + max_steps):
features[:, i] = d2l.reshape(net(features[:, i - tau: i]), -1)
steps = (1, 4, 16, 64)
d2l.plot([time[tau + i - 1: T - max_steps + i] for i in steps],
[d2l.numpy(features[:, tau + i - 1]) for i in steps], 'time', 'x',
legend=[f'{i}-step preds' for i in steps], xlim=[5, 1000],
figsize=(6, 3))
.. figure:: output_sequence_ce248f_78_0.svg
.. raw:: html

.. raw:: html
.. code:: python
max_steps = 64
features = tf.Variable(tf.zeros((T - tau - max_steps + 1, tau + max_steps)))
# Column `i` (`i` < `tau`) are observations from `x` for time steps from
# `i + 1` to `i + T - tau - max_steps + 1`
for i in range(tau):
features[:, i].assign(x[i: i + T - tau - max_steps + 1].numpy().T)
# Column `i` (`i` >= `tau`) are the (`i - tau + 1`)-step-ahead predictions for
# time steps from `i + 1` to `i + T - tau - max_steps + 1`
for i in range(tau, tau + max_steps):
features[:, i].assign(tf.reshape(net((features[:, i - tau: i])), -1))
steps = (1, 4, 16, 64)
d2l.plot([time[tau + i - 1: T - max_steps + i] for i in steps],
[d2l.numpy(features[:, tau + i - 1]) for i in steps], 'time', 'x',
legend=[f'{i}-step preds' for i in steps], xlim=[5, 1000],
figsize=(6, 3))
.. figure:: output_sequence_ce248f_81_0.svg
.. raw:: html

.. raw:: html
`Discussions `__
.. raw:: html

.. raw:: html
`Discussions `__
.. raw:: html

.. raw:: html
`Discussions `__
.. raw:: html

.. raw:: html