I am trying to optimize the parameters of a function but the loss remains the same
class BoardOptimizer(torch.nn.Module):
def __init__(self):
super().__init__()
# Initialize parameters with current values but make them trainable
self.highpass_freq = torch.nn.Parameter(torch.tensor(100.0), requires_grad=True)
def get_pedalboard(self):
highpass_freq = torch.clamp(self.highpass_freq, 20, 500)
board = Pedalboard([
HighpassFilter(cutoff_frequency_hz=float(highpass_freq)),
])
return board
def forward(self, sample_audio, sample_rate):
board = self.get_pedalboard()
music = board.process(sample_audio, sample_rate)
return music
model = BoardOptimizer()
criterion = torch.nn.MSELoss()
optimizer = torch.optim.SGD(model.parameters(), lr=1e-6)
print(type(criterion), y.shape, y_pred.shape)
for t in range(2000):
y_pred = torch.Tensor(model(x, 16000))
loss = criterion(y_pred, y)
if t % 100 == 99:
print(t, loss.item())
optimizer.zero_grad()
loss.requires_grad = True
loss.backward()
optimizer.step()
99 0.011413631960749626
199 0.011413631960749626
299 0.011413631960749626
399 0.011413631960749626
I assumed the error was because of gradients not being turned on.
Tried printing this
for name, param in model.named_parameters():
print(f"{name}: grad = {param.grad}")
but i kept getting
highpass_freq: grad = None
I am trying to optimize the parameters of a function but the loss remains the same
class BoardOptimizer(torch.nn.Module):
def __init__(self):
super().__init__()
# Initialize parameters with current values but make them trainable
self.highpass_freq = torch.nn.Parameter(torch.tensor(100.0), requires_grad=True)
def get_pedalboard(self):
highpass_freq = torch.clamp(self.highpass_freq, 20, 500)
board = Pedalboard([
HighpassFilter(cutoff_frequency_hz=float(highpass_freq)),
])
return board
def forward(self, sample_audio, sample_rate):
board = self.get_pedalboard()
music = board.process(sample_audio, sample_rate)
return music
model = BoardOptimizer()
criterion = torch.nn.MSELoss()
optimizer = torch.optim.SGD(model.parameters(), lr=1e-6)
print(type(criterion), y.shape, y_pred.shape)
for t in range(2000):
y_pred = torch.Tensor(model(x, 16000))
loss = criterion(y_pred, y)
if t % 100 == 99:
print(t, loss.item())
optimizer.zero_grad()
loss.requires_grad = True
loss.backward()
optimizer.step()
99 0.011413631960749626
199 0.011413631960749626
299 0.011413631960749626
399 0.011413631960749626
I assumed the error was because of gradients not being turned on.
Tried printing this
for name, param in model.named_parameters():
print(f"{name}: grad = {param.grad}")
but i kept getting
highpass_freq: grad = None
Share
Improve this question
asked Jan 17 at 18:36
Allen BijoAllen Bijo
1
1 Answer
Reset to default 0The loss
should explicitely require a gradient to be evaluated, so I would remove the loss.requires_grad = True
line. Also, try to rewrite the first line
def get_pedalboard(self):
highpass_freq = self.highpass_freq.clamp(20, 500)
Are you sure the
board = Pedalboard([
HighpassFilter(cutoff_frequency_hz=float(highpass_freq)),
])
works with PyTorch autograd?