Skip to content

Commit 129d8c8

Browse files
Pytorch load warning fix (#6)
* added weights_only=True for all model loading to fix torch.load() warning
1 parent 4f1814e commit 129d8c8

File tree

2 files changed

+19
-10
lines changed

2 files changed

+19
-10
lines changed

paule/paule.py

Lines changed: 18 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -124,7 +124,8 @@ def __init__(self, *, pred_model=None, pred_optimizer=None, inv_model=None, inv_
124124
self.pred_model = ForwardModel(num_lstm_layers=1, hidden_size=720).double()
125125
self.pred_model.load_state_dict(
126126
torch.load(os.path.join(DIR, "pretrained_models/predictive/pred_model_common_voice_1_720_lr_0001_50_00001_50_000001_50_0000001_200.pt"),
127-
map_location=self.device))
127+
map_location=self.device,
128+
weights_only=True))
128129
# Non-Linear Perceptron PREDictive MODEL
129130
#self.pred_model = NonLinearModel(input_channel=30, output_channel=60,
130131
# mode="pred",
@@ -145,7 +146,8 @@ def __init__(self, *, pred_model=None, pred_optimizer=None, inv_model=None, inv_
145146
self.inv_model = InverseModelMelTimeSmoothResidual(num_lstm_layers=1, hidden_size=720).double()
146147
self.inv_model.load_state_dict(
147148
torch.load(os.path.join(DIR, "pretrained_models/inverse/inv_model_common_voice_3_1_720_5_lr_0001_50_00001_50_000001_50_0000001_200.pt"),
148-
map_location=self.device))
149+
map_location=self.device,
150+
weights_only=True))
149151
# Non-Linear Perceptron INVerse MODEL
150152
#self.inv_model = NonLinearModel(input_channel=60, output_channel=30,
151153
# mode="inv",
@@ -165,7 +167,8 @@ def __init__(self, *, pred_model=None, pred_optimizer=None, inv_model=None, inv_
165167
self.embedder = EmbeddingModel(num_lstm_layers=2, hidden_size=720).double()
166168
self.embedder.load_state_dict(torch.load(
167169
os.path.join(DIR, "pretrained_models/embedder/embed_model_common_voice_syn_rec_2_720_0_dropout_07_noise_6e05_rmse_lr_00001_200.pt"),
168-
map_location=self.device))
170+
map_location=self.device,
171+
weights_only=True))
169172
self.embedder.eval()
170173
# Non-Linear Perceptron Embedder
171174
#self.embedder = NonLinearModel(input_channel=60, output_channel=300,
@@ -187,7 +190,8 @@ def __init__(self, *, pred_model=None, pred_optimizer=None, inv_model=None, inv_
187190
self.cp_gen_model = Generator().double()
188191
self.cp_gen_model.load_state_dict(torch.load(
189192
os.path.join(DIR, "pretrained_models/cp_gan/conditional_trained_cp_generator_whole_critic_it_5_10_20_40_80_100_415.pt"),
190-
map_location=self.device))
193+
map_location=self.device,
194+
weights_only=True))
191195
self.cp_gen_model = self.cp_gen_model.to(self.device)
192196
self.cp_gen_model.eval()
193197

@@ -198,7 +202,8 @@ def __init__(self, *, pred_model=None, pred_optimizer=None, inv_model=None, inv_
198202
self.mel_gen_model = Generator(output_size=60).double()
199203
self.mel_gen_model.load_state_dict(torch.load(
200204
os.path.join(DIR, "pretrained_models/mel_gan/conditional_trained_mel_generator_synthesized_critic_it_5_10_20_40_80_100_400.pt"),
201-
map_location=self.device))
205+
map_location=self.device,
206+
weights_only=True))
202207
self.mel_gen_model = self.mel_gen_model.to(self.device)
203208
self.mel_gen_model.eval()
204209

@@ -213,7 +218,8 @@ def __init__(self, *, pred_model=None, pred_optimizer=None, inv_model=None, inv_
213218
self.speech_classifier.load_state_dict(torch.load(
214219
os.path.join(DIR, "pretrained_models/speech_classifier/linear_model_rec_as_nonspeech.pt"),
215220
#os.path.join(DIR, "pretrained_models/speech_classifier/model_rec_as_nonspeech.pt"),
216-
map_location=self.device))
221+
map_location=self.device,
222+
weights_only=True))
217223
self.speech_classifier = self.speech_classifier.double()
218224
self.speech_classifier = self.speech_classifier.to(self.device)
219225
self.speech_classifier.eval()
@@ -231,7 +237,8 @@ def __init__(self, *, pred_model=None, pred_optimizer=None, inv_model=None, inv_
231237
apply_half_sequence=False).double()
232238
self.cp_tube_model.load_state_dict(torch.load(
233239
os.path.join(DIR, "pretrained_models/somatosensory/cp_to_tube_model_1_360_lr_0001_50_00001_100.pt"),
234-
map_location=self.device))
240+
map_location=self.device,
241+
weights_only=True))
235242
self.cp_tube_model = self.cp_tube_model.to(self.device)
236243

237244
# Tube-Mel Model (tube -> mel)
@@ -245,7 +252,8 @@ def __init__(self, *, pred_model=None, pred_optimizer=None, inv_model=None, inv_
245252
apply_half_sequence=True).double()
246253
self.tube_mel_model.load_state_dict(torch.load(
247254
os.path.join(DIR, "pretrained_models/somatosensory/tube_to_mel_model_1_360_lr_0001_50_00001_100.pt"),
248-
map_location=self.device))
255+
map_location=self.device,
256+
weights_only=True))
249257
self.tube_mel_model = self.tube_mel_model.to(self.device)
250258

251259
# Tube-Embedder Model (tube -> semvec)
@@ -259,7 +267,8 @@ def __init__(self, *, pred_model=None, pred_optimizer=None, inv_model=None, inv_
259267
post_upsampling_size=0).double()
260268
self.tube_embedder.load_state_dict(torch.load(
261269
os.path.join(DIR, "pretrained_models/somatosensory/tube_to_vector_model_2_720_0_dropout_07_noise_6e05_rmse_lr_00001_200.pt"),
262-
map_location=self.device))
270+
map_location=self.device,
271+
weights_only=True))
263272
self.tube_embedder = self.tube_embedder.to(self.device)
264273
self.tube_embedder.eval()
265274

pyproject.toml

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,6 @@
11
[tool.poetry]
22
name = "paule"
3-
version = "0.4.6"
3+
version = "0.4.7"
44
description = "paule implements the Predictive Articulatory speech synthesis model Utilizing Lexical Embeddings (PAULE), which is a control model for the articulatory speech synthesizer VocalTractLab (VTL)."
55

66
license = "GPLv3+"

0 commit comments

Comments
 (0)