-
Notifications
You must be signed in to change notification settings - Fork 19.7k
Fix Muon optimizer to use variable.name instead of deprecated variable.path #21854
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
base: master
Are you sure you want to change the base?
Changes from 1 commit
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
| Original file line number | Diff line number | Diff line change |
|---|---|---|
|
|
@@ -24,7 +24,7 @@ class Muon(optimizer.Optimizer): | |
| will be used. This is not configurable. | ||
| - If the argument `exclude_embeddings` (defaults to `True`) is set | ||
| to `True`, the AdamW step will be used. | ||
| - For any variablewith a name that matches an expression | ||
| - For any variable with a name that matches an expression | ||
| listed in the argument `exclude_layers` (a list), the | ||
| AdamW step will be used. | ||
| - Any other variable uses the Muon step. | ||
|
|
@@ -46,17 +46,17 @@ class Muon(optimizer.Optimizer): | |
| that takes no arguments and returns the actual value to use. | ||
| The exponential decay rate for the 1st moment estimates. Defaults to | ||
| `0.9`. | ||
| adam_beta_2: A float value or a constant float tensor, ora callable | ||
| adam_beta_2: A float value or a constant float tensor, or a callable | ||
| that takes no arguments and returns the actual value to use. | ||
| The exponential decay rate for the 2nd moment estimates. Defaults to | ||
| `0.999`. | ||
| epsilon: A small constant for numerical stability. This is | ||
| "epsilon hat" in the Kingma and Ba paper | ||
| (in the formula just before Section 2.1), | ||
| not the epsilon in Algorithm 1 of the paper. | ||
| It be used at Adamw.Defaults to `1e-7`. | ||
| It be used at Adamw. Defaults to `1e-7`. | ||
| exclude_layers: List of strings, keywords of layer names to exclude. | ||
| All layers with keywords in their path will use adamw. | ||
| All layers with keywords in their name will use adamw. | ||
| exclude_embeddings: Boolean value | ||
| If True, embedding layers will use adamw. | ||
| muon_a: Float, parameter a of the muon algorithm. | ||
|
|
@@ -134,10 +134,10 @@ def _should_use_adamw(self, variable): | |
| # any {0,1}-D parameters should all be optimized by adam | ||
| if not 1 < len(variable.shape) < 4: | ||
| return True | ||
| if self.exclude_embeddings and "embedding" in variable.path.lower(): | ||
| if self.exclude_embeddings and "embedding" in variable.name.lower(): | ||
| return True | ||
| for keyword in self.exclude_layers: | ||
| if re.search(keyword, variable.path): | ||
| if re.search(keyword, variable.name): | ||
| return True | ||
| return False | ||
|
|
||
|
|
@@ -161,13 +161,13 @@ def build(self, var_list): | |
|
|
||
| for var in var_list: | ||
| if not self._overwrite_variable_with_gradient(var): | ||
| self.adam_momentums[var.path] = ( | ||
| self.adam_momentums[var.name] = ( | ||
| self.add_variable_from_reference( | ||
| reference_variable=var, name="momentum" | ||
| ) | ||
| ) | ||
| if self._should_use_adamw(var): | ||
| self.adam_velocities[var.path] = ( | ||
| self.adam_velocities[var.name] = ( | ||
| self.add_variable_from_reference( | ||
| reference_variable=var, name="velocity" | ||
| ) | ||
|
|
@@ -183,7 +183,14 @@ def update_step(self, gradient, variable, learning_rate): | |
| self._muon_update_step(gradient, variable, learning_rate) | ||
|
|
||
| def _muon_update_step(self, gradient, variable, lr): | ||
| m = self.adam_momentums[variable.path] | ||
| if variable.name not in self.adam_momentums: | ||
| self.adam_momentums[variable.name] = ( | ||
| self.add_variable_from_reference( | ||
| reference_variable=variable, name="momentum" | ||
| ) | ||
| ) | ||
|
Comment on lines
+186
to
+191
Contributor
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. To improve maintainability and reduce code duplication, you could extract this logic for lazily initializing the momentum variable into a helper method. This same logic is repeated in You could define a new private method like this: def _maybe_init_momentum(self, variable):
if variable.name not in self.adam_momentums:
self.adam_momentums[variable.name] = (
self.add_variable_from_reference(
reference_variable=variable, name="momentum"
)
)Then you can replace this block with a single call: self._maybe_init_momentum(variable) |
||
|
|
||
| m = self.adam_momentums[variable.name] | ||
| self.assign_add(m, ops.add(gradient, m * (self.momentum - 1))) | ||
| shape = variable.shape | ||
| if self.nesterov: | ||
|
|
@@ -200,6 +207,19 @@ def _muon_update_step(self, gradient, variable, lr): | |
|
|
||
| def _adamw_update_step(self, gradient, variable, learning_rate): | ||
| """Update step given gradient and the associated model variable.""" | ||
| if variable.name not in self.adam_momentums: | ||
| self.adam_momentums[variable.name] = ( | ||
| self.add_variable_from_reference( | ||
| reference_variable=variable, name="momentum" | ||
| ) | ||
| ) | ||
|
Comment on lines
+210
to
+215
Contributor
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. |
||
| if variable.name not in self.adam_velocities: | ||
| self.adam_velocities[variable.name] = ( | ||
| self.add_variable_from_reference( | ||
| reference_variable=variable, name="velocity" | ||
| ) | ||
| ) | ||
|
|
||
| lr = ops.cast(learning_rate, variable.dtype) | ||
| gradient = ops.cast(gradient, variable.dtype) | ||
| local_step = ops.cast(self.iterations + 1, variable.dtype) | ||
|
|
@@ -210,8 +230,8 @@ def _adamw_update_step(self, gradient, variable, learning_rate): | |
| ops.cast(self.adam_beta_2, variable.dtype), local_step | ||
| ) | ||
|
|
||
| m = self.adam_momentums[variable.path] | ||
| v = self.adam_velocities[variable.path] | ||
| m = self.adam_momentums[variable.name] | ||
| v = self.adam_velocities[variable.name] | ||
|
|
||
| alpha = lr * ops.sqrt(1 - adam_beta_2_power) / (1 - adam_beta_1_power) | ||
|
|
||
|
|
||
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
These code paths are intended for Keras variables, for which we mean to use the
pathattribute, which is different from thenameattribute.There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Please see the detailed comment here: #21797 (review)
For how to fix this properly.