RNN Classification Models¶
This example shows the application of RNN models in river-torch with and without usage of an incremental class adaption strategy.
In [1]:
Copied!
from deep_river.classification import RollingClassifierInitialized
from river import metrics, preprocessing, datasets
import torch
from deep_river.classification import RollingClassifierInitialized
from river import metrics, preprocessing, datasets
import torch
RNN Model¶
In [2]:
Copied!
class RnnModule(torch.nn.Module):
def __init__(self, n_features, hidden_size=16, num_layers=1):
super().__init__()
self.num_layers = num_layers
self.n_features = n_features
self.hidden_size = hidden_size
self.rnn = torch.nn.RNN(
input_size=n_features, hidden_size=hidden_size, num_layers=num_layers,
)
self.linear = torch.nn.Linear(hidden_size, 2)
def forward(self, X, **kwargs):
out, hn = self.rnn(X)
hn = hn[-1] # Take the last hidden state
out = self.linear(hn)
return torch.nn.functional.softmax(out, dim=-1) # Return class probabilities
class RnnModule(torch.nn.Module):
def __init__(self, n_features, hidden_size=16, num_layers=1):
super().__init__()
self.num_layers = num_layers
self.n_features = n_features
self.hidden_size = hidden_size
self.rnn = torch.nn.RNN(
input_size=n_features, hidden_size=hidden_size, num_layers=num_layers,
)
self.linear = torch.nn.Linear(hidden_size, 2)
def forward(self, X, **kwargs):
out, hn = self.rnn(X)
hn = hn[-1] # Take the last hidden state
out = self.linear(hn)
return torch.nn.functional.softmax(out, dim=-1) # Return class probabilities
Classification without incremental class adapation strategy¶
In [3]:
Copied!
dataset = datasets.Keystroke()
metric = metrics.Accuracy()
optimizer_fn = torch.optim.SGD
model_pipeline = preprocessing.StandardScaler()
model_pipeline |= RollingClassifierInitialized(
module=RnnModule(n_features=31, hidden_size=16, num_layers=2),
loss_fn="binary_cross_entropy",
optimizer_fn=torch.optim.SGD,
window_size=20,
lr=1e-2,
append_predict=True,
is_class_incremental=False,
)
model_pipeline
dataset = datasets.Keystroke()
metric = metrics.Accuracy()
optimizer_fn = torch.optim.SGD
model_pipeline = preprocessing.StandardScaler()
model_pipeline |= RollingClassifierInitialized(
module=RnnModule(n_features=31, hidden_size=16, num_layers=2),
loss_fn="binary_cross_entropy",
optimizer_fn=torch.optim.SGD,
window_size=20,
lr=1e-2,
append_predict=True,
is_class_incremental=False,
)
model_pipeline
Out[3]:
StandardScaler
StandardScaler (
with_std=True
)
RollingClassifierInitialized
RollingClassifierInitialized (
module=RnnModule(
(rnn): RNN(31, 16, num_layers=2)
(linear): Linear(in_features=16, out_features=2, bias=True)
)
loss_fn="binary_cross_entropy"
optimizer_fn=<class 'torch.optim.sgd.SGD'>
lr=0.01
output_is_logit=True
is_class_incremental=False
is_feature_incremental=False
device="cpu"
seed=42
window_size=20
append_predict=True
)
In [4]:
Copied!
for x, y in dataset:
y_pred = model_pipeline.predict_one(x) # make a prediction
metric.update(y, y_pred) # update the metric
model_pipeline.learn_one(x, y) # make the model learn
print(f"Accuracy: {metric.get():.2f}")
for x, y in dataset:
y_pred = model_pipeline.predict_one(x) # make a prediction
metric.update(y, y_pred) # update the metric
model_pipeline.learn_one(x, y) # make the model learn
print(f"Accuracy: {metric.get():.2f}")
Accuracy: 0.04
LSTM Model¶
In [5]:
Copied!
class LSTMModule(torch.nn.Module):
def __init__(self, n_features, hidden_size=4):
super().__init__()
self.n_features = n_features
self.hidden_size = hidden_size
self.lstm = torch.nn.LSTM(
input_size=n_features, hidden_size=hidden_size, num_layers=1
)
self.linear = torch.nn.Linear(hidden_size, 2)
def forward(self, X, **kwargs):
# lstm with input, hidden, and internal state
output, (hn, cn) = self.lstm(X)
x = hn.view(-1, self.hidden_size)
x = self.linear(x)
return torch.nn.functional.softmax(x, dim=-1)
class LSTMModule(torch.nn.Module):
def __init__(self, n_features, hidden_size=4):
super().__init__()
self.n_features = n_features
self.hidden_size = hidden_size
self.lstm = torch.nn.LSTM(
input_size=n_features, hidden_size=hidden_size, num_layers=1
)
self.linear = torch.nn.Linear(hidden_size, 2)
def forward(self, X, **kwargs):
# lstm with input, hidden, and internal state
output, (hn, cn) = self.lstm(X)
x = hn.view(-1, self.hidden_size)
x = self.linear(x)
return torch.nn.functional.softmax(x, dim=-1)
Classifcation without incremental class adaption strategy¶
In [6]:
Copied!
dataset = datasets.Keystroke()
metric = metrics.Accuracy()
optimizer_fn = torch.optim.SGD
model_pipeline = preprocessing.StandardScaler()
model_pipeline |= RollingClassifierInitialized(
module=LSTMModule(n_features=31, hidden_size=4),
loss_fn="binary_cross_entropy",
optimizer_fn=torch.optim.SGD,
window_size=20,
lr=1e-2,
append_predict=True,
)
model_pipeline
dataset = datasets.Keystroke()
metric = metrics.Accuracy()
optimizer_fn = torch.optim.SGD
model_pipeline = preprocessing.StandardScaler()
model_pipeline |= RollingClassifierInitialized(
module=LSTMModule(n_features=31, hidden_size=4),
loss_fn="binary_cross_entropy",
optimizer_fn=torch.optim.SGD,
window_size=20,
lr=1e-2,
append_predict=True,
)
model_pipeline
Out[6]:
StandardScaler
StandardScaler (
with_std=True
)
RollingClassifierInitialized
RollingClassifierInitialized (
module=LSTMModule(
(lstm): LSTM(31, 4)
(linear): Linear(in_features=4, out_features=2, bias=True)
)
loss_fn="binary_cross_entropy"
optimizer_fn=<class 'torch.optim.sgd.SGD'>
lr=0.01
output_is_logit=True
is_class_incremental=False
is_feature_incremental=False
device="cpu"
seed=42
window_size=20
append_predict=True
)
In [7]:
Copied!
for x, y in dataset:
y_pred = model_pipeline.predict_one(x) # make a prediction
metric.update(y, y_pred) # update the metric
model_pipeline.learn_one(x, y) # make the model learn
print(f"Accuracy: {metric.get():.2f}")
for x, y in dataset:
y_pred = model_pipeline.predict_one(x) # make a prediction
metric.update(y, y_pred) # update the metric
model_pipeline.learn_one(x, y) # make the model learn
print(f"Accuracy: {metric.get():.2f}")
Accuracy: 0.03
In [ ]:
Copied!
In [ ]:
Copied!