From 5636bf73e5500cbda9da8beaac8b73d625f82e6f Mon Sep 17 00:00:00 2001 From: Rahul Lashkari Date: Tue, 15 Apr 2025 23:12:25 +0530 Subject: [PATCH] fix(docs): Correct typos in Gemma_1 notebooks --- Gemma/[Gemma_1]Advanced_Prompting_Techniques.ipynb | 4 ++-- Gemma/[Gemma_1]Basics_with_HF.ipynb | 8 ++++---- Gemma/[Gemma_1]Common_use_cases.ipynb | 2 +- Gemma/[Gemma_1]Finetune_distributed.ipynb | 2 +- Gemma/[Gemma_1]Minimal_RAG.ipynb | 6 +++--- Gemma/[Gemma_1]RAG_with_ChromaDB.ipynb | 4 ++-- Gemma/[Gemma_1]data_parallel_inference_in_jax_tpu.ipynb | 2 +- 7 files changed, 14 insertions(+), 14 deletions(-) diff --git a/Gemma/[Gemma_1]Advanced_Prompting_Techniques.ipynb b/Gemma/[Gemma_1]Advanced_Prompting_Techniques.ipynb index 8668754..1e87306 100644 --- a/Gemma/[Gemma_1]Advanced_Prompting_Techniques.ipynb +++ b/Gemma/[Gemma_1]Advanced_Prompting_Techniques.ipynb @@ -82,7 +82,7 @@ "source": [ "### Configure your credentials\n", "\n", - "Add your your Kaggle credentials to the Colab Secrets manager to securely store it.\n", + "Add your Kaggle credentials to the Colab Secrets manager to securely store it.\n", "\n", "1. Open your Google Colab notebook and click on the 🔑 Secrets tab in the left panel. \"The\n", "2. Create new secrets: `KAGGLE_USERNAME` and `KAGGLE_KEY`\n", @@ -350,7 +350,7 @@ } ], "source": [ - "prompt = \"\"\"Genereate a single line of hashtags for the given topic by in the same style as the following examples:\n", + "prompt = \"\"\"Generate a single line of hashtags for the given topic by in the same style as the following examples:\n", "\n", "Topic: Books\n", "#BooksLover #Books #MyBooks #BestBook #BookOfTheYear\n", diff --git a/Gemma/[Gemma_1]Basics_with_HF.ipynb b/Gemma/[Gemma_1]Basics_with_HF.ipynb index b42a953..6e3352c 100644 --- a/Gemma/[Gemma_1]Basics_with_HF.ipynb +++ b/Gemma/[Gemma_1]Basics_with_HF.ipynb @@ -881,8 +881,8 @@ } ], "source": [ - "# Note: The token needs to have \"write\" permisssion\n", - "# You can chceck it here:\n", + "# Note: The token needs to have \"write\" permission\n", + "# You can check it here:\n", "# https://huggingface.co/settings/tokens\n", "model.push_to_hub(\"my-gemma-2-finetuned-model\")" ] @@ -921,9 +921,9 @@ }, "outputs": [], "source": [ - "!model=\"google/gemma-1.1-2b-it\" # ID of the model in Hugging Face hube\n", + "!model=\"google/gemma-1.1-2b-it\" # ID of the model in Hugging Face hub\n", "# (you can use your own fine-tuned model from\n", - "# the prevous step)\n", + "# the previous step)\n", "!volume=$PWD/data # Shared directory with the Docker container\n", "# to avoid downloading weights every run\n", "\n", diff --git a/Gemma/[Gemma_1]Common_use_cases.ipynb b/Gemma/[Gemma_1]Common_use_cases.ipynb index 22e33dd..ed2537a 100644 --- a/Gemma/[Gemma_1]Common_use_cases.ipynb +++ b/Gemma/[Gemma_1]Common_use_cases.ipynb @@ -81,7 +81,7 @@ "source": [ "### Configure your credentials\n", "\n", - "Add your your Kaggle credentials to the Colab Secrets manager to securely store it.\n", + "Add your Kaggle credentials to the Colab Secrets manager to securely store it.\n", "\n", "1. Open your Google Colab notebook and click on the 🔑 Secrets tab in the left panel. \"The\n", "2. Create new secrets: `KAGGLE_USERNAME` and `KAGGLE_KEY`\n", diff --git a/Gemma/[Gemma_1]Finetune_distributed.ipynb b/Gemma/[Gemma_1]Finetune_distributed.ipynb index 1783cca..8ebda9b 100644 --- a/Gemma/[Gemma_1]Finetune_distributed.ipynb +++ b/Gemma/[Gemma_1]Finetune_distributed.ipynb @@ -1260,7 +1260,7 @@ "source": [ "# What's next\n", "\n", - "In this tutorial, you learned how to chat with the Gemma 7B model and fine-tune it to speak like a pirate, using Keras on JAX. You also learned how to load and train the large model in a distributed manner, on powerful TPUs, uising model parallelism.\n", + "In this tutorial, you learned how to chat with the Gemma 7B model and fine-tune it to speak like a pirate, using Keras on JAX. You also learned how to load and train the large model in a distributed manner, on powerful TPUs, using model parallelism.\n", "\n", "Here are a few suggestions for what else to learn, about Keras and JAX:\n", "* [Distributed training with Keras 3](https://keras.io/guides/distribution/).\n", diff --git a/Gemma/[Gemma_1]Minimal_RAG.ipynb b/Gemma/[Gemma_1]Minimal_RAG.ipynb index 54cc62a..f828de7 100644 --- a/Gemma/[Gemma_1]Minimal_RAG.ipynb +++ b/Gemma/[Gemma_1]Minimal_RAG.ipynb @@ -85,9 +85,9 @@ "\n", "### Chunking the data\n", "\n", - "To improve the relevance of content returned by the vector database during retrieval, break down large documents into smaller pieces or chunks while ingesting the document.\n", + "To improve the relevance of content returned by the vector database during retrieval, break down large documents into smaller pieces or chunks while ingesting the document.\n", "\n", - "In this cookcook, you will use the [Google I/O 2024 Gemma family expansion launch blog](https://developers.googleblog.com/en/gemma-family-and-toolkit-expansion-io-2024/) as the sample document and Google's [Open Source HtmlChunker](https://github.com/google/labs-prototypes/tree/main/seeds/chunker-python) to chunk it up into passages." + "In this cookbook, you will use the [Google I/O 2024 Gemma family expansion launch blog](https://developers.googleblog.com/en/gemma-family-and-toolkit-expansion-io-2024/) as the sample document and Google's [Open Source HtmlChunker](https://github.com/google/labs-prototypes/tree/main/seeds/chunker-python) to chunk it up into passages." ] }, { @@ -828,7 +828,7 @@ "id": "uXLpmtoeU0gx" }, "source": [ - "Now load the Gemma model in quanzied 4-bit mode using Hugging Face." + "Now load the Gemma model in quantized 4-bit mode using Hugging Face." ] }, { diff --git a/Gemma/[Gemma_1]RAG_with_ChromaDB.ipynb b/Gemma/[Gemma_1]RAG_with_ChromaDB.ipynb index 3bfee74..82eea9e 100644 --- a/Gemma/[Gemma_1]RAG_with_ChromaDB.ipynb +++ b/Gemma/[Gemma_1]RAG_with_ChromaDB.ipynb @@ -87,7 +87,7 @@ "\n", "To improve the relevance of content returned by the vector database during retrieval, break down large documents into smaller pieces or chunks while ingesting the document.\n", "\n", - "In this cookcook, you will use the [Google I/O 2024 Gemma family expansion launch blog](https://developers.googleblog.com/en/gemma-family-and-toolkit-expansion-io-2024/) as the sample document and Google's [Open Source HtmlChunker](https://github.com/google/labs-prototypes/tree/main/seeds/chunker-python) to chunk it up into passages." + "In this cookbook, you will use the [Google I/O 2024 Gemma family expansion launch blog](https://developers.googleblog.com/en/gemma-family-and-toolkit-expansion-io-2024/) as the sample document and Google's [Open Source HtmlChunker](https://github.com/google/labs-prototypes/tree/main/seeds/chunker-python) to chunk it up into passages." ] }, { @@ -400,7 +400,7 @@ "source": [ "### Generate the answer\n", "\n", - "Now load the Gemma model in quanzied 4-bit mode using Hugging Face." + "Now load the Gemma model in quantized 4-bit mode using Hugging Face." ] }, { diff --git a/Gemma/[Gemma_1]data_parallel_inference_in_jax_tpu.ipynb b/Gemma/[Gemma_1]data_parallel_inference_in_jax_tpu.ipynb index 8bf04f9..205780d 100644 --- a/Gemma/[Gemma_1]data_parallel_inference_in_jax_tpu.ipynb +++ b/Gemma/[Gemma_1]data_parallel_inference_in_jax_tpu.ipynb @@ -217,7 +217,7 @@ "## Load the Model\n", "You will use the latest [Gemma-2B](https://huggingface.co/google/gemma-1.1-2b-it), this model offers 2 billion parameters, ensuring a lightweight footprint.\n", "\n", - "The Gemma model can be loaded using the familiar [`from_pretrained`](https://huggingface.co/docs/transformers/v4.38.1/en/main_classes/model#transformers.FlaxPreTrainedModel.from_pretrained) method in Transformers. This method downloads the model weights from the Hugging Face Hub the first time it is called, and subsequently intialises the Gemma model using these weights.\n" + "The Gemma model can be loaded using the familiar [`from_pretrained`](https://huggingface.co/docs/transformers/v4.38.1/en/main_classes/model#transformers.FlaxPreTrainedModel.from_pretrained) method in Transformers. This method downloads the model weights from the Hugging Face Hub the first time it is called, and subsequently initializes the Gemma model using these weights.\n" ] }, {