@@ -673,12 +673,13 @@ def compile(
673
673
coefficients.
674
674
weighted_metrics: List of metrics to be evaluated and weighted by
675
675
`sample_weight` or `class_weight` during training and testing.
676
- run_eagerly: Bool. Defaults to `False`. If `True`, this `Model`'s
677
- logic will not be wrapped in a `tf.function`. Recommended to leave
678
- this as `None` unless your `Model` cannot be run inside a
679
- `tf.function`. `run_eagerly=True` is not supported when using
680
- `tf.distribute.experimental.ParameterServerStrategy`.
681
- steps_per_execution: Int. Defaults to 1. The number of batches to
676
+ run_eagerly: Bool. If `True`, this `Model`'s logic will not be
677
+ wrapped in a `tf.function`. Recommended to leave this as `None`
678
+ unless your `Model` cannot be run inside a `tf.function`.
679
+ `run_eagerly=True` is not supported when using
680
+ `tf.distribute.experimental.ParameterServerStrategy`. Defaults to
681
+ `False`.
682
+ steps_per_execution: Int. The number of batches to
682
683
run during each `tf.function` call. Running multiple batches
683
684
inside a single `tf.function` call can greatly improve performance
684
685
on TPUs or small models with a large Python overhead. At most, one
@@ -687,7 +688,7 @@ def compile(
687
688
the size of the epoch. Note that if `steps_per_execution` is set
688
689
to `N`, `Callback.on_batch_begin` and `Callback.on_batch_end`
689
690
methods will only be called every `N` batches (i.e. before/after
690
- each `tf.function` execution).
691
+ each `tf.function` execution). Defaults to `1`.
691
692
jit_compile: If `True`, compile the model training step with XLA.
692
693
[XLA](https://www.tensorflow.org/xla) is an optimizing compiler
693
694
for machine learning.
@@ -708,9 +709,10 @@ def compile(
708
709
not process the same data. The number of shards should be at least
709
710
the number of workers for good performance. A value of 'auto'
710
711
turns on exact evaluation and uses a heuristic for the number of
711
- shards based on the number of workers. Defaults to 0, meaning no
712
+ shards based on the number of workers. 0, meaning no
712
713
visitation guarantee is provided. NOTE: Custom implementations of
713
714
`Model.test_step` will be ignored when doing exact evaluation.
715
+ Defaults to `0`.
714
716
**kwargs: Arguments supported for backwards compatibility only.
715
717
"""
716
718
if jit_compile and not tf_utils .can_jit_compile (warn = True ):
@@ -1457,11 +1459,11 @@ def fit(
1457
1459
of index `epochs` is reached.
1458
1460
verbose: 'auto', 0, 1, or 2. Verbosity mode.
1459
1461
0 = silent, 1 = progress bar, 2 = one line per epoch.
1460
- 'auto' defaults to 1 for most cases, but 2 when used with
1462
+ 'auto' becomes 1 for most cases, but 2 when used with
1461
1463
`ParameterServerStrategy`. Note that the progress bar is not
1462
1464
particularly useful when logged to a file, so verbose=2 is
1463
1465
recommended when not running interactively (eg, in a production
1464
- environment).
1466
+ environment). Defaults to 'auto'.
1465
1467
callbacks: List of `keras.callbacks.Callback` instances.
1466
1468
List of callbacks to apply during training.
1467
1469
See `tf.keras.callbacks`. Note
@@ -2059,11 +2061,11 @@ def evaluate(
2059
2061
they generate batches).
2060
2062
verbose: `"auto"`, 0, 1, or 2. Verbosity mode.
2061
2063
0 = silent, 1 = progress bar, 2 = single line.
2062
- `"auto"` defaults to 1 for most cases, and to 2 when used with
2064
+ `"auto"` becomes 1 for most cases, and to 2 when used with
2063
2065
`ParameterServerStrategy`. Note that the progress bar is not
2064
2066
particularly useful when logged to a file, so `verbose=2` is
2065
2067
recommended when not running interactively (e.g. in a production
2066
- environment).
2068
+ environment). Defaults to 'auto'.
2067
2069
sample_weight: Optional Numpy array of weights for the test samples,
2068
2070
used for weighting the loss function. You can either pass a flat
2069
2071
(1D) Numpy array with the same length as the input samples
@@ -2419,11 +2421,11 @@ def predict(
2419
2421
(since they generate batches).
2420
2422
verbose: `"auto"`, 0, 1, or 2. Verbosity mode.
2421
2423
0 = silent, 1 = progress bar, 2 = single line.
2422
- `"auto"` defaults to 1 for most cases, and to 2 when used with
2424
+ `"auto"` becomes 1 for most cases, and to 2 when used with
2423
2425
`ParameterServerStrategy`. Note that the progress bar is not
2424
2426
particularly useful when logged to a file, so `verbose=2` is
2425
2427
recommended when not running interactively (e.g. in a production
2426
- environment).
2428
+ environment). Defaults to 'auto'.
2427
2429
steps: Total number of steps (batches of samples)
2428
2430
before declaring the prediction round finished.
2429
2431
Ignored with the default value of `None`. If x is a `tf.data`
@@ -2958,7 +2960,7 @@ def save(self, filepath, overwrite=True, save_format=None, **kwargs):
2958
2960
SavedModel format arguments:
2959
2961
include_optimizer: Only applied to SavedModel and legacy HDF5
2960
2962
formats. If False, do not save the optimizer state.
2961
- Defaults to True.
2963
+ Defaults to ` True` .
2962
2964
signatures: Only applies to SavedModel format. Signatures to save
2963
2965
with the SavedModel. See the `signatures` argument in
2964
2966
`tf.saved_model.save` for details.
@@ -3051,7 +3053,7 @@ def save_weights(
3051
3053
target location, or provide the user with a manual prompt.
3052
3054
save_format: Either 'tf' or 'h5'. A `filepath` ending in '.h5' or
3053
3055
'.keras' will default to HDF5 if `save_format` is `None`.
3054
- Otherwise `None` defaults to 'tf'.
3056
+ Otherwise, `None` becomes 'tf'. Defaults to `None` .
3055
3057
options: Optional `tf.train.CheckpointOptions` object that specifies
3056
3058
options for saving weights.
3057
3059
@@ -3366,17 +3368,17 @@ def summary(
3366
3368
(e.g. set this to adapt the display to different
3367
3369
terminal window sizes).
3368
3370
positions: Relative or absolute positions of log elements
3369
- in each line. If not provided,
3370
- defaults to `[0.3, 0.6, 0.70, 1.]`
3371
+ in each line. If not provided, becomes
3372
+ `[0.3, 0.6, 0.70, 1.]`. Defaults to `None`.
3371
3373
print_fn: Print function to use. By default, prints to `stdout`.
3372
3374
If `stdout` doesn't work in your environment, change to `print`.
3373
3375
It will be called on each line of the summary.
3374
3376
You can set it to a custom function
3375
3377
in order to capture the string summary.
3376
3378
expand_nested: Whether to expand the nested models.
3377
- If not provided, defaults to `False`.
3379
+ Defaults to `False`.
3378
3380
show_trainable: Whether to show if a layer is trainable.
3379
- If not provided, defaults to `False`.
3381
+ Defaults to `False`.
3380
3382
layer_range: a list or tuple of 2 strings,
3381
3383
which is the starting layer name and ending layer name
3382
3384
(both inclusive) indicating the range of layers to be printed
@@ -3942,7 +3944,8 @@ def _get_compile_args(self, user_metrics=True):
3942
3944
3943
3945
Args:
3944
3946
user_metrics: Whether to return user-supplied metrics or `Metric`
3945
- objects. Defaults to returning the user-supplied metrics.
3947
+ objects. If True, returns the user-supplied metrics.
3948
+ Defaults to `True`.
3946
3949
3947
3950
Returns:
3948
3951
Dictionary of arguments that were used when compiling the model.
@@ -4186,11 +4189,11 @@ def _get_verbosity(verbose, distribute_strategy):
4186
4189
distribute_strategy ._should_use_with_coordinator
4187
4190
or not io_utils .is_interactive_logging_enabled ()
4188
4191
):
4189
- # Default to epoch-level logging for PSStrategy or using absl
4192
+ # Defaults to epoch-level logging for PSStrategy or using absl
4190
4193
# logging.
4191
4194
return 2
4192
4195
else :
4193
- return 1 # Default to batch-level logging otherwise.
4196
+ return 1 # Defaults to batch-level logging otherwise.
4194
4197
return verbose
4195
4198
4196
4199
0 commit comments