-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy paths1.py
More file actions
155 lines (132 loc) · 5.38 KB
/
s1.py
File metadata and controls
155 lines (132 loc) · 5.38 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
import time
import json
import csv
from selenium import webdriver
from selenium.webdriver.chrome.service import Service
from selenium.webdriver.chrome.options import Options
from selenium.webdriver.common.by import By
from selenium.common.exceptions import NoSuchElementException
# ========== Scraper Function ==========
def scrape_daraz_product(driver, product_url):
driver.get(product_url)
time.sleep(3)
def safe_get_text(by, selector, default="N/A"):
try:
return driver.find_element(by, selector).text.strip()
except NoSuchElementException:
return default
def safe_get_element(by, selector):
try:
return driver.find_element(by, selector)
except NoSuchElementException:
return None
# Main product details
product_name = safe_get_text(By.CLASS_NAME, "pdp-mod-product-badge-title")
brand = safe_get_text(By.CLASS_NAME, "pdp-product-brand__brand-link")
discounted_price = safe_get_text(By.CLASS_NAME, "pdp-price_type_normal")
listed_price = safe_get_text(By.CLASS_NAME, "pdp-price_type_deleted", default=discounted_price)
discount_percent = safe_get_text(By.CLASS_NAME, "pdp-product-price__discount", default="0%")
warranty_info = safe_get_text(By.CLASS_NAME, "delivery-option-item__title", "No warranty info")
seller_name = safe_get_text(By.CLASS_NAME, "seller-name__detail-name", "Daraz")
average_rating = safe_get_text(By.CLASS_NAME, "score-average", "No rating")
# Rating count
rating_count_raw = safe_get_text(By.CLASS_NAME, "count", "0 Ratings")
rating_count = rating_count_raw.split()[0] if rating_count_raw else "0"
product_url = driver.current_url
# Image URL
image_url = ""
image_element = safe_get_element(By.CSS_SELECTOR, ".gallery-preview-panel__image img")
if image_element:
image_url = image_element.get_attribute("src")
# Product description
product_description = ""
try:
desc_block = driver.find_element(By.CLASS_NAME, "pdp-product-highlights")
paragraphs = desc_block.find_elements(By.TAG_NAME, "p")
for p in paragraphs:
text = p.text.strip()
if text:
product_description += f"{text}\n"
except:
product_description = "Description not found or failed to parse."
# Scroll to bottom to load reviews
driver.execute_script("window.scrollTo(0, document.body.scrollHeight);")
time.sleep(3)
# Reviews
reviews = []
review_elements = driver.find_elements(By.CSS_SELECTOR, "div.item")
for review in review_elements:
try:
name = review.find_element(By.CSS_SELECTOR, ".top span").text.strip()
except:
name = "N/A"
try:
date = review.find_element(By.CSS_SELECTOR, ".title.right").text.strip()
except:
date = "N/A"
try:
comment = review.find_element(By.CSS_SELECTOR, ".content").text.strip()
except:
comment = "N/A"
try:
stars = review.find_elements(By.CSS_SELECTOR, "img[src*='lazadaicon_star_fill']")
rating = str(len(stars))
except:
rating = "N/A"
reviews.append({
"reviewer": name,
"rating": rating,
"date": date,
"comment": comment
})
return {
"product_name": product_name,
"brand": brand,
"discounted_price": discounted_price,
"listed_price": listed_price,
"discount_percent": discount_percent,
"warranty_info": warranty_info,
"availability": "N/A",
"seller_name": seller_name,
"average_rating": average_rating,
"rating_count": rating_count,
"image_url": image_url,
"product_url": product_url,
"product_description": product_description.strip().replace("\n", " "),
"reviews": json.dumps(reviews, ensure_ascii=False)
}
# ========== Load Product Links ==========
product_urls = []
with open("TV_links.csv", "r", encoding="utf-8") as f:
reader = csv.reader(f)
next(reader) # Skip header
for row in reader:
if row and row[0].startswith("http"):
product_urls.append(row[0])
print(f"📦 Total product links to scrape: {len(product_urls)}")
# ========== Setup Selenium ==========
options = Options()
options.add_argument("--headless")
driver = webdriver.Chrome(
service=Service(executable_path=r"D:/5th trimester/Data Wrangling/Wrangling Project/chromedriver-win64/chromedriver-win64/chromedriver.exe"),
options=options
)
# ========== Scrape and Save ==========
with open("TV_daraz_products.csv", mode="w", newline="", encoding="utf-8") as file:
fieldnames = [
"product_name", "brand", "discounted_price", "listed_price", "discount_percent",
"warranty_info", "availability", "seller_name", "average_rating", "rating_count",
"image_url", "product_url", "product_description", "reviews"
]
writer = csv.DictWriter(file, fieldnames=fieldnames)
writer.writeheader()
for i, url in enumerate(product_urls, 1):
print(f"\n🔎 Scraping {i}/{len(product_urls)}: {url}")
try:
data = scrape_daraz_product(driver, url)
writer.writerow(data)
except Exception as e:
print(f"❌ Failed to scrape {url}: {e}")
continue
driver.quit()
print("\n✅ Done! All product data saved to TV_daraz_products.csv")