diff --git a/The_Fashion_Challenge.ipynb b/The_Fashion_Challenge.ipynb new file mode 100644 index 0000000..e5b19e3 --- /dev/null +++ b/The_Fashion_Challenge.ipynb @@ -0,0 +1,457 @@ +{ + "nbformat": 4, + "nbformat_minor": 0, + "metadata": { + "colab": { + "provenance": [], + "include_colab_link": true + }, + "kernelspec": { + "name": "python3", + "display_name": "Python 3" + }, + "language_info": { + "name": "python" + } + }, + "cells": [ + { + "cell_type": "markdown", + "metadata": { + "id": "view-in-github", + "colab_type": "text" + }, + "source": [ + "\"Open" + ] + }, + { + "cell_type": "markdown", + "source": [ + "# The Fashion Challenge!\n", + "\n", + "You've successfully trained a neural network to recognize handwritten digits. That's a great first step!\n", + "\n", + "Now, it's time to test your skills on a new, slightly harder challenge: classifying items of clothing.\n", + "\n", + "We'll use the Fashion MNIST dataset. It's just like the digit dataset:\n", + "\n", + "- 60,000 training images, 10,000 test images.\n", + "\n", + "- Each image is 28x28 grayscale.\n", + "\n", + "- There are 10 classes, but instead of \"0-9\", they are:\n", + "\n", + " - 0: T-shirt/top\n", + "\n", + " - 1: Trouser\n", + "\n", + " - 2: Pullover\n", + "\n", + " - 3: Dress\n", + "\n", + " - 4: Coat\n", + "\n", + " - 5: Sandal\n", + "\n", + " - 6: Shirt\n", + "\n", + " - 7: Sneaker\n", + "\n", + " - 8: Bag\n", + "\n", + " - 9: Ankle boot\n", + "\n", + "Your goal is to build, train, and test a model that can look at an image of a sandal and know it's not a sneaker!\n", + "\n", + "Let's begin! Follow the steps below and fill in the code in the ... sections." + ], + "metadata": { + "id": "3FK_QIk0NGne" + } + }, + { + "cell_type": "markdown", + "source": [ + "# Section 1: Setup & Imports\n", + "\n", + "Challenge 1: Import all the libraries you'll need. (Hint: You'll need datasets, matplotlib.pyplot, numpy, and tensorflow.keras)" + ], + "metadata": { + "id": "0zPevg12ObWI" + } + }, + { + "cell_type": "code", + "source": [ + "# TODO: Import your libraries here\n", + "import datasets\n", + "import matplotlib.pyplot as plt\n", + "import numpy as np\n", + "from tensorflow import keras as tfk" + ], + "metadata": { + "id": "EIdrEt3oOjog" + }, + "execution_count": null, + "outputs": [] + }, + { + "cell_type": "markdown", + "source": [ + "# Section 2: Load & Explore the Data\n", + "\n", + "Challenge 2: Load the `fashion_mnist` dataset (it's in the datasets library, just like `p2pfl/MNIST`). After loading, print the dataset to see its structure." + ], + "metadata": { + "id": "ze_S9ehZO3Z4" + } + }, + { + "cell_type": "code", + "source": [ + "# TODO: Load the 'fashion_mnist' dataset\n", + "dataset = ...\n", + "\n", + "# TODO: Print the dataset\n", + "print(...)" + ], + "metadata": { + "id": "WWb_j_CvP0JU" + }, + "execution_count": null, + "outputs": [] + }, + { + "cell_type": "markdown", + "source": [ + "# Let's see the data\n", + "\n", + "Challenge 3: Let's visualize the data.\n", + "\n", + " - I've given you a `class_names` list to make the labels readable.\n", + "\n", + " - Get a random image from the train set.\n", + "\n", + " - Use plt.imshow() to show the image.\n", + "\n", + " - Set the plt.title() to show its name (e.g., \"Sneaker\") using the `class_names` list." + ], + "metadata": { + "id": "-7W0-weeQhh_" + } + }, + { + "cell_type": "code", + "source": [ + "# We provide this list for you to make labels easy to read!\n", + "class_names = [\n", + " \"T-shirt/top\", \"Trouser\", \"Pullover\", \"Dress\", \"Coat\",\n", + " \"Sandal\", \"Shirt\", \"Sneaker\", \"Bag\", \"Ankle boot\"\n", + "]\n", + "\n", + "# TODO: Get a random example from dataset['train']\n", + "random_example = ...\n", + "\n", + "# TODO: Get the image and label from the random example\n", + "img = ...\n", + "label_index = ...\n", + "\n", + "# TODO: Get the \"name\" of the label from the class_names list\n", + "label_name = class_names[label_index]\n", + "\n", + "# TODO: Show the image\n", + "plt.imshow(...)\n", + "plt.title(f\"Label: {label_name}\")\n", + "plt.axis(\"off\")\n", + "plt.show()" + ], + "metadata": { + "id": "fl3y0zXwRXB2" + }, + "execution_count": null, + "outputs": [] + }, + { + "cell_type": "markdown", + "source": [ + "# Section 3: Prepare the Data\n", + "\n", + "## This step is identical to the MNIST notebook. We need to:\n", + "\n", + " - Normalize: Scale pixels from 0-255 to 0.0-1.0.\n", + "\n", + " - Flatten: Turn the 28x28 image into a 1x784 vector.\n", + "\n", + "## Challenge 4: Preprocess the entire dataset.\n", + "\n", + " - Define a preprocess function.\n", + "\n", + " - Use dataset.map() to apply it.\n", + "\n", + " - Create the final `X_train`, `y_train`, `X_test`, `y_test` numpy arrays from the processed_dataset." + ], + "metadata": { + "id": "z3ByO2yuRseZ" + } + }, + { + "cell_type": "code", + "source": [ + "# TODO: Define your 'preprocess' function\n", + "def preprocess(examples):\n", + " # 1. Convert PIL images to numpy arrays\n", + " images = ...\n", + " # 2. Convert to numpy stack\n", + " images_np = ...\n", + " # 3. Normalize (divide by 255.0)\n", + " images_np = ...\n", + " # 4. Flatten (reshape to -1, 784)\n", + " images_flat = ...\n", + "\n", + " examples['image_flat'] = images_flat\n", + " return examples\n", + "\n", + "# TODO: Apply the function to the dataset\n", + "processed_dataset = ...\n", + "\n", + "# TODO: Create your final numpy arrays\n", + "X_train = np.array(...)\n", + "y_train = np.array(...)\n", + "\n", + "X_test = np.array(...)\n", + "y_test = np.array(...)\n", + "\n", + "# TODO: Print the shape of X_train to confirm it's (60000, 784)\n", + "print(f\"X_train shape: {X_train.shape}\")" + ], + "metadata": { + "id": "vVBJtl2GSbeN" + }, + "execution_count": null, + "outputs": [] + }, + { + "cell_type": "markdown", + "source": [ + "# Section 4: Build the Model\n", + "\n", + "## Challenge 5: Build your neural network!\n", + "\n", + " Create a keras.Sequential model.\n", + "\n", + " Add an input layer (layers.Dense). It should have 128 units, activation='relu', and the correct input_shape for our 784-pixel-long images.\n", + "\n", + " Add one hidden layer (layers.Dense). Let's give it 64 units and activation='relu'.\n", + "\n", + " Add an output layer (layers.Dense).\n", + "\n", + " - How many units does it need? (Hint: How many classes are there?)\n", + "\n", + " - What activation function should you use for multi-class classification?(Hint: \"softmax\")\n", + "\n", + " compile() the model.\n", + "\n", + " - Use optimizer='adam'.\n", + "\n", + " - Use the correct loss function for \"sparse\" integer labels (like 0, 1, 2...).\n", + "\n", + " - Add metrics=['accuracy'].\n", + "\n", + " Finally, print the model.summary()." + ], + "metadata": { + "id": "QUwSKivvSzAv" + } + }, + { + "cell_type": "code", + "source": [ + "# TODO: 1. Create a Sequential model\n", + "model = ...\n", + "\n", + "# TODO: 2. Add the Input layer (128 units)\n", + "# ...\n", + "\n", + "# TODO: 3. Add the Hidden layer (64 units)\n", + "# ...\n", + "\n", + "# TODO: 4. Add the Output layer (10 units, 'softmax')\n", + "# ...\n", + "\n", + "# TODO: 5. Compile the model\n", + "model.compile(\n", + " optimizer=...,\n", + " loss=...,\n", + " metrics=...\n", + ")\n", + "\n", + "# TODO: 6. Print the summary\n", + "model.summary()" + ], + "metadata": { + "id": "WOwY7-wNVad8" + }, + "execution_count": null, + "outputs": [] + }, + { + "cell_type": "markdown", + "source": [ + "# Section 5: Train the Model\n", + "\n", + "Challenge 6: It's time to learn!\n", + "\n", + " - Call model.fit() on your X_train and y_train data.\n", + "\n", + " - Train for 10 epochs.\n", + "\n", + " - Don't forget to add your validation_data so you can see how it performs on the test set!\n", + "\n", + " - Store the training output in a variable called history." + ], + "metadata": { + "id": "8UbU0nW4VnWd" + } + }, + { + "cell_type": "code", + "source": [ + "print(\"Starting training...\")\n", + "\n", + "# TODO: Train the model using model.fit()\n", + "history = model.fit(\n", + " ..., # Training images\n", + " ..., # Training labels\n", + " epochs=...,\n", + " validation_data=(..., ...) # Test images and labels\n", + ")\n", + "\n", + "print(\"Training finished!\")" + ], + "metadata": { + "id": "A8q14ZfAV9y7" + }, + "execution_count": null, + "outputs": [] + }, + { + "cell_type": "markdown", + "source": [ + "# Section 6: Evaluate & Predict\n", + "\n", + "Challenge 7: Plot the accuracy.\n", + "\n", + " - Use the history.history object to plot the accuracy and val_accuracy over time." + ], + "metadata": { + "id": "1RWCb6BqWdZ8" + } + }, + { + "cell_type": "code", + "source": [ + "# TODO: Plot the 'accuracy' and 'val_accuracy' from the history object\n", + "\n" + ], + "metadata": { + "id": "iHYyzw-_W1EO" + }, + "execution_count": null, + "outputs": [] + }, + { + "cell_type": "markdown", + "source": [ + "# Let's make a final prediction!\n", + "\n", + "## Challenge 8: Test your model on a random image.\n", + "\n", + "- Get a random index.\n", + "\n", + " - Get the original image (from dataset['test']) and the processed image (from X_test).\n", + "\n", + " - Get the correct label name (e.g., \"Bag\") using class_names.\n", + "\n", + " - Prepare the processed image for the model (use np.expand_dims).\n", + "\n", + " - Get the model's prediction_probabilities.\n", + "\n", + " - Find the predicted_class index using np.argmax().\n", + "\n", + " - Get the predicted label name (e.g., \"Sneaker\").\n", + "\n", + " - Plot the original image and set the title to show both the prediction and the correct answer!" + ], + "metadata": { + "id": "5lcWje7-W8s5" + } + }, + { + "cell_type": "code", + "source": [ + "# TODO: 1. Get a random index\n", + "random_index = np.random.randint(0, len(dataset['test']))\n", + "\n", + "# --- Get the correct answer ---\n", + "\n", + "\n", + "# TODO: 2. Get the original image\n", + "img = dataset['test'][random_index]['image']\n", + "\n", + "# TODO: 3. Get the correct label index and name\n", + "correct_label_index = dataset['test'][random_index]['label']\n", + "correct_label_name = class_names[correct_label_index]\n", + "\n", + "# --- Make a prediction ---\n", + "\n", + "\n", + "# TODO: 2. Get the processed image (from X_test)\n", + "image_to_predict = ...[random_index]\n", + "\n", + "# TODO: 4. Prepare the image for the model\n", + "image_batch = np.expand_dims(...)\n", + "\n", + "# TODO: 5. Get the prediction probabilities\n", + "prediction_probabilities = model.predict(...)\n", + "\n", + "# TODO: 6. Get the predicted class index\n", + "predicted_class_index = np.argmax(...)\n", + "\n", + "# TODO: 7. Get the predicted class name\n", + "predicted_class_name = class_names[...]\n", + "\n", + "# --- Show the result ---\n", + "\n", + "\n", + "# TODO: 8. Plot the image and set the title\n", + "plt.imshow(img, cmap=\"gray\")\n", + "plt.title(f\"Correct: {correct_label_name}\\nPrediction: {predicted_class_name}\")\n", + "plt.axis(\"off\")\n", + "plt.show()" + ], + "metadata": { + "id": "SA5A9ujbXXkM" + }, + "execution_count": null, + "outputs": [] + }, + { + "cell_type": "markdown", + "source": [ + "# Bonus Challenges\n", + "\n", + "If you finish early, try these!\n", + "\n", + " - Tune for Accuracy: Can you get over 90% validation accuracy? Try adding another hidden layer, or changing the number of `units` (e.g., 256, 128). Try training for more `epochs` (like 20).\n", + "\n", + " - Why Normalize? Go back to Section 3. What happens if you comment out the normalization line (the `/ 255.0`)? Does the model still train? (This shows why preprocessing is so important!).\n", + "\n", + " - Why `relu`? Go back to Section 4. What happens if you change `activation='relu'` to `activation='sigmoid'`? Does it train as well?" + ], + "metadata": { + "id": "IKibhlP4XxTh" + } + } + ] +} \ No newline at end of file diff --git a/__pycache__/main.cpython-310.pyc b/__pycache__/main.cpython-310.pyc index e1eec8c..38ef59d 100644 Binary files a/__pycache__/main.cpython-310.pyc and b/__pycache__/main.cpython-310.pyc differ diff --git a/__pycache__/test_main.cpython-310.pyc b/__pycache__/test_main.cpython-310.pyc index f1e95ec..712e468 100644 Binary files a/__pycache__/test_main.cpython-310.pyc and b/__pycache__/test_main.cpython-310.pyc differ diff --git a/main.py b/main.py index 2719019..bde6066 100644 --- a/main.py +++ b/main.py @@ -11,7 +11,9 @@ add_numbers(-1, 4) -> 3 """ def add_numbers(a, b): - pass + num_1 = a + num_2 = b + return (num_1+num_2) # Exercise 2: Subtract Numbers @@ -23,7 +25,9 @@ def add_numbers(a, b): subtract_numbers(3, 5) -> -2 """ def subtract_numbers(a, b): - pass + num_1 = a + num_2 = b + return (num_1-num_2) # Exercise 3: FruitLoop @@ -42,8 +46,17 @@ def subtract_numbers(a, b): Loop """ def fruitloop(n): - pass - + value_lst = [] + for value in range(1, n+1): + if value % 3 == 0 and value % 5 == 0: + value_lst.append('FruitLoop') + elif value % 3 == 0: + value_lst.append('Fruit') + elif value % 5== 0: + value_lst.append('Loop') + else: + value_lst.append(str(value)) + return value_lst # Exercise 4: Fibonacci """ @@ -53,8 +66,23 @@ def fruitloop(n): fibonacci(5) -> 5 fibonacci(7) -> 13 """ -def fibonacci(n: int): - pass +# def fibonacci(n: int): +# value_lst = [] +# current = int +# for values in range(0, n): +# value_lst.append(values) + +# # while len(value_lst) != 0: +# # try: +# # current = value_lst[0] + value_lst[1] +# # value_lst.pop(0) +# # except + + +# print(current) +# pass +# fibonacci(7) + # Exercise 5: Find Maximum @@ -66,7 +94,17 @@ def fibonacci(n: int): find_max([-1,-5,-3]) -> -1 """ def find_max(numbers: list): - pass + current_num = numbers[0] + + numbers.pop(numbers.index(current_num)) + + for values in numbers: + if values > current_num: + current_num = values + continue + + return current_num + # Exercise 6: Find Minimum @@ -78,7 +116,17 @@ def find_max(numbers: list): find_min([-1,-5,-3]) -> -5 """ def find_min(numbers: list): - pass + current_num = numbers[0] + + numbers.pop(numbers.index(current_num)) + + for values in numbers: + if values < current_num: + current_num = values + continue + + return current_num + # Exercise 7: Person Class @@ -98,10 +146,12 @@ def find_min(numbers: list): """ class Person: def __init__(self, name: str, age: int): - pass + self.name = name + self.age = age def greet(self): - pass + return f'Hello, my name is {self.name} and I am {str(self.age)} years old' +