Skip to content

Commit

Permalink
Fix minor bugs to get recipe running.
Browse files Browse the repository at this point in the history
  • Loading branch information
kauterry committed Aug 26, 2024
1 parent 6552824 commit 48f2bdc
Show file tree
Hide file tree
Showing 2 changed files with 19 additions and 2 deletions.
4 changes: 4 additions & 0 deletions src/fairseq2/recipes/wav2vec2/asr/train.py
Original file line number Diff line number Diff line change
Expand Up @@ -225,6 +225,8 @@ def _large_10h() -> Wav2Vec2AsrTrainConfig:

config = _base_10h()

assert isinstance(config.optimizer_config, AdamWConfig)

config.model_arch = "large_10h"
config.model_config = model_config
config.pretrained_model = "wav2vec2_large"
Expand Down Expand Up @@ -260,6 +262,8 @@ def _large_100h() -> Wav2Vec2AsrTrainConfig:

config = _base_10h()

assert isinstance(config.optimizer_config, AdamWConfig)

config.dataset = "librispeech_asr_100h"
config.model_arch = "large_100h"
config.model_config = model_config
Expand Down
17 changes: 15 additions & 2 deletions src/fairseq2/recipes/wav2vec2/train.py
Original file line number Diff line number Diff line change
Expand Up @@ -203,7 +203,6 @@ def _base_960h_fs2_mask() -> Wav2Vec2TrainConfig:
def _base_960h() -> Wav2Vec2TrainConfig:
config = Wav2Vec2TrainConfig()

assert isinstance(config.model_config, Wav2Vec2Config)
config.model_config.encoder_config.first_pass_dropout_p = 0.1
config.max_temporal_mask_prob = 0.65
config.mask_codebase = "fairseq1"
Expand All @@ -213,6 +212,9 @@ def _base_960h() -> Wav2Vec2TrainConfig:
@wav2vec2_train_preset("base_960h_perf")
def _base_960h_perf() -> Wav2Vec2TrainConfig:
config = _base_960h()

assert isinstance(config.lr_scheduler_config, PolynomialDecayLRConfig)

config.max_num_steps = 10000
config.lr_scheduler_config.num_warmup_steps = 800
return config
Expand All @@ -223,6 +225,9 @@ def _large_960h_fs2_mask() -> Wav2Vec2TrainConfig:
config = Wav2Vec2TrainConfig()
config.model_arch = "large"

assert isinstance(config.optimizer_config, AdamWConfig)
assert isinstance(config.lr_scheduler_config, PolynomialDecayLRConfig)

model_config = wav2vec2_archs.get("large", return_empty=True)
model_config.encoder_config.first_pass_dropout_p = 0.1
config.model_config = model_config
Expand All @@ -240,6 +245,9 @@ def _large_960h() -> Wav2Vec2TrainConfig:
config = Wav2Vec2TrainConfig()
config.model_arch = "large"

assert isinstance(config.optimizer_config, AdamWConfig)
assert isinstance(config.lr_scheduler_config, PolynomialDecayLRConfig)

model_config = wav2vec2_archs.get("large", return_empty=True)
model_config.encoder_config.first_pass_dropout_p = 0.1
model_config.max_temporal_mask_prob = 0.65
Expand All @@ -257,6 +265,9 @@ def _large_960h() -> Wav2Vec2TrainConfig:
@wav2vec2_train_preset("large_960h_perf")
def _large_960h_perf() -> Wav2Vec2TrainConfig:
config = _large_960h()

assert isinstance(config.lr_scheduler_config, PolynomialDecayLRConfig)

config.max_num_steps = 10000
config.lr_scheduler_config.num_warmup_steps = 800
return config
Expand Down Expand Up @@ -403,7 +414,9 @@ def load_wav2vec2_trainer(
) from ex

# Initialize the validation unit.
valid_unit = Wav2Vec2EvalUnit(dp_model, gang)
valid_unit = Wav2Vec2EvalUnit(
dp_model, gang, config.diversity_loss_weight, config.penalty_weight
)

try:
valid_data_reader = dataset.create_reader(
Expand Down

0 comments on commit 48f2bdc

Please sign in to comment.