|
354 | 354 | "\n",
|
355 | 355 | "batch_size = 200\n",
|
356 | 356 | "steps_per_epoch = 60000 // batch_size\n",
|
| 357 | + "validation_steps = 10000 // batch_size\n", |
357 | 358 | "\n",
|
358 | 359 | "train_dataset = get_dataset(batch_size, is_training=True)\n",
|
359 | 360 | "test_dataset = get_dataset(batch_size, is_training=False)\n",
|
360 | 361 | "\n",
|
361 | 362 | "model.fit(train_dataset,\n",
|
362 | 363 | " epochs=5,\n",
|
363 | 364 | " steps_per_epoch=steps_per_epoch,\n",
|
364 |
| - " validation_data=test_dataset)" |
| 365 | + " validation_data=test_dataset, \n", |
| 366 | + " validation_steps=validation_steps)" |
365 | 367 | ]
|
366 | 368 | },
|
367 | 369 | {
|
|
371 | 373 | "id": "8hSGBIYtUugJ"
|
372 | 374 | },
|
373 | 375 | "source": [
|
374 |
| - "To reduce python overhead, and maximize the performance of your TPU, try out the **experimental** `experimental_steps_per_execution` argument to `Model.compile`. Here it approximately **doubles** the throughput:" |
| 376 | + "To reduce python overhead, and maximize the performance of your TPU, try out the **experimental** `experimental_steps_per_execution` argument to `Model.compile`. Here it increases throughput by about 50%:" |
375 | 377 | ]
|
376 | 378 | },
|
377 | 379 | {
|
|
395 | 397 | "model.fit(train_dataset,\n",
|
396 | 398 | " epochs=5,\n",
|
397 | 399 | " steps_per_epoch=steps_per_epoch,\n",
|
398 |
| - " validation_data=test_dataset)" |
| 400 | + " validation_data=test_dataset,\n", |
| 401 | + " validation_steps=validation_steps)" |
399 | 402 | ]
|
400 | 403 | },
|
401 | 404 | {
|
|
0 commit comments