苏宁易购(聚焦家电、3C 品类的综合电商平台)的商品搜索功能(item_search接口,非官方命名)是获取品类商品列表的核心入口,数据包含实时价格、促销活动、库存状态等关键信息,对选品分析、价格监控、竞品追踪等场景具有重要价值。由于平台无公开官方 API,开发者需通过页面解析或逆向工程实现搜索对接。本文系统讲解接口逻辑、参数解析、技术实现及家电 3C 场景适配策略,助你构建稳定的苏宁商品搜索系统。
一、接口基础认知(核心功能与场景)
二、对接前置准备(参数与 URL 结构)
三、接口调用流程(基于页面解析与动态接口)
四、代码实现示例(Python)
import requests
import time
import random
import re
import urllib.parse
from bs4 import BeautifulSoup
from fake_useragent import UserAgent
from typing import List, Dict
class SuningSearchApi:
def __init__(self, proxy_pool: List[str] = None, cookie: str = "", city_id: str = "110100"):
self.base_url = "https://search.suning.com/{keyword}/"
self.ua = UserAgent()
self.proxy_pool = proxy_pool # 代理池列表,如["http://ip:port", ...]
self.cookie = cookie # 登录态Cookie,需包含cityId
self.city_id = city_id # 城市ID(默认北京110100)
# 分类ID映射(简化版)
self.category_map = {
"智能手机": "1005263",
"空调": "1005204",
"笔记本电脑": "1005259"
}
# 品牌ID映射(简化版)
self.brand_map = {
"华为": "7016",
"小米": "10676",
"TCL": "7068"
}
def _get_headers(self) -> Dict[str, str]:
"""生成随机请求头,包含城市定位"""
headers = {
"User-Agent": self.ua.random,
"Referer": "https://www.suning.com/",
"Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8"
}
if self.cookie:
# 确保Cookie包含cityId(覆盖默认城市)
if "cityId=" not in self.cookie:
self.cookie += f"; cityId={self.city_id}"
headers["Cookie"] = self.cookie
return headers
def _get_proxy(self) -> Dict[str, str]:
"""随机获取代理"""
if self.proxy_pool and len(self.proxy_pool) > 0:
proxy = random.choice(self.proxy_pool)
return {"http": proxy, "https": proxy}
return None
def _clean_price(self, price_str: str) -> float:
"""清洗价格(去除¥、逗号等)"""
if not price_str:
return 0.0
price_str = re.sub(r"[^\d.]", "", price_str)
return float(price_str) if price_str else 0.0
def _clean_review(self, review_str: str) -> str:
"""清洗好评率(提取百分比)"""
if not review_str:
return ""
rate = re.search(r"\d+%", review_str)
return rate.group() if rate else ""
def _parse_item(self, item_soup) -> Dict[str, str]:
"""解析单条商品数据"""
# 提取商品ID
product_id = item_soup.get("data-pid") or ""
# 提取促销标签(多个标签用逗号拼接)
promotion_tags = [tag.text.strip() for tag in item_soup.select(".tag-promotion")]
# 提取评价数据
review_str = item_soup.select_one(".evaluate")?.text.strip() or ""
return {
"product_id": product_id,
"title": item_soup.select_one(".title-selling-point")?.text.strip() or "",
"main_image": item_soup.select_one(".img-block img")?.get("src") or "",
"url": item_soup.select_one(".img-block a")?.get("href") or "",
"price": {
"sale": self._clean_price(item_soup.select_one(".def-price")?.text or ""),
"sale_str": item_soup.select_one(".def-price")?.text.strip() or "",
"member": self._clean_price(item_soup.select_one(".member-price")?.text or ""),
"member_str": item_soup.select_one(".member-price")?.text.strip() or ""
},
"promotion": {
"tags": promotion_tags,
"delivery": item_soup.select_one(".deliver-info")?.text.strip() or ""
},
"stock": {
"status": item_soup.select_one(".stockState")?.text.strip() or "",
"available": "有货" in (item_soup.select_one(".stockState")?.text or "")
},
"brand": {
"name": item_soup.select_one(".brand-name")?.text.strip() or ""
},
"review": {
"rate": self._clean_review(review_str),
"review_str": review_str
}
}
def _parse_page(self, html: str) -> List[Dict]:
"""解析页面的商品列表"""
soup = BeautifulSoup(html, "lxml")
# 商品列表容器(需根据实际页面结构调整)
item_list = soup.select("ul.general li.product-item")
return [self._parse_item(item) for item in item_list if item.get("data-pid")]
def _get_total_pages(self, html: str) -> int:
"""获取总页数"""
soup = BeautifulSoup(html, "lxml")
page_box = soup.select_one(".page-box")
if not page_box:
return 1
# 提取最后一页页码
last_page = page_box.select("a")[-1].get("data-page") or "1"
return int(last_page) if last_page.isdigit() else 1
def item_search(self,
keyword: str = "",
category: str = "",
price_low: float = None,
price_high: float = None,
brand: str = "",
promotion: int = 0,
sort: str = "",
page_limit: int = 5) -> Dict:
"""
搜索苏宁商品列表
:param keyword: 搜索关键词
:param category: 分类名称(如“智能手机”)或分类ID
:param price_low: 最低售价(元)
:param price_high: 最高售价(元)
:param brand: 品牌名称(如“华为”)或品牌ID
:param promotion: 促销类型(1=满减,2=以旧换新,0=全部)
:param sort: 排序方式(salecount/price等)
:param page_limit: 最大页数(默认5)
:return: 标准化搜索结果
"""
try:
# 1. 参数预处理
if not keyword and not category:
return {"success": False, "error_msg": "关键词(keyword)和分类(category)至少需提供一个"}
# 转换分类名称为ID
if category in self.category_map:
cat_id = self.category_map[category]
else:
cat_id = category if category else ""
# 转换品牌名称为ID
brand_id = self.brand_map.get(brand, "") if brand else ""
# 编码关键词(苏宁搜索URL中关键词需编码后直接拼接)
encoded_keyword = urllib.parse.quote(keyword, encoding="utf-8") if keyword else ""
# 处理空关键词(纯分类搜索)
encoded_keyword = encoded_keyword or "*"
all_items = []
current_page = 1
while current_page <= page_limit:
# 构建参数
params = {
"page": current_page,
"cityId": self.city_id # 区域参数
}
if cat_id:
params["catId"] = cat_id
if price_low is not None:
params["priceLow"] = price_low
if price_high is not None:
params["priceHigh"] = price_high
if brand_id:
params["brandId"] = brand_id
if promotion in (0, 1, 2, 3):
params["promotion"] = promotion
if sort:
params["sort"] = sort
# 构建URL
url = self.base_url.format(keyword=encoded_keyword)
# 发送请求(带随机延迟)
time.sleep(random.uniform(2, 4)) # 控制频率,避免反爬
headers = self._get_headers()
proxy = self._get_proxy()
response = requests.get(
url=url,
params=params,
headers=headers,
proxies=proxy,
timeout=10
)
response.raise_for_status()
html = response.text
# 解析当前页商品
items = self._parse_page(html)
if not items:
break # 无数据,终止分页
all_items.extend(items)
# 获取总页数(仅第一页需要)
if current_page == 1:
total_pages = self._get_total_pages(html)
# 修正最大页数(不超过page_limit和50)
total_pages = min(total_pages, page_limit, 50)
if total_pages < current_page:
break
# 若当前页是最后一页,终止
if current_page >= total_pages:
break
current_page += 1
# 去重(基于product_id)
seen_ids = set()
unique_items = []
for item in all_items:
if item["product_id"] not in seen_ids:
seen_ids.add(item["product_id"])
unique_items.append(item)
return {
"success": True,
"total": len(unique_items),
"page_processed": current_page,
"items": unique_items,
"city_id": self.city_id # 数据对应的城市
}
except requests.exceptions.HTTPError as e:
if "403" in str(e):
return {"success": False, "error_msg": "触发反爬,建议更换代理或Cookie", "code": 403}
if "401" in str(e):
return {"success": False, "error_msg": "Cookie无效,请重新登录获取", "code": 401}
return {"success": False, "error_msg": f"HTTP错误: {str(e)}", "code": response.status_code}
except Exception as e:
return {"success": False, "error_msg": f"搜索失败: {str(e)}", "code": -1}
# 使用示例
if __name__ == "__main__":
# 代理池(替换为有效代理)
PROXIES = [
"http://123.45.67.89:8888",
"http://98.76.54.32:8080"
]
# 登录态Cookie(从浏览器获取,需包含cityId)
COOKIE = "userid=xxx; sessionId=xxx; cityId=110100; areaId=110101" # 北京
# 初始化API客户端(指定城市为北京)
search_api = SuningSearchApi(proxy_pool=PROXIES, cookie=COOKIE, city_id="110100")
# 搜索“华为手机”,分类“智能手机”,价格3000-5000元,以旧换新,销量降序,最多3页
result = search_api.item_search(
keyword="华为手机",
category="智能手机",
price_low=3000,
price_high=5000,
brand="华为",
promotion=2,
sort="salecount",
page_limit=3
)
if result["success"]:
print(f"搜索成功:共找到 {result['total']} 件商品,处理 {result['page_processed']} 页(区域:{result['city_id']})")
for i, item in enumerate(result["items"][:5]): # 打印前5条
print(f"\n商品 {i+1}:")
print(f"标题:{item['title'][:50]}...") # 截断长标题
print(f"价格:{item['price']['sale_str']} | 会员价:{item['price']['member_str'] or '无'}")
print(f"促销:{', '.join(item['promotion']['tags'])} | 配送:{item['promotion']['delivery']}")
print(f"库存:{item['stock']['status']} | {'可购' if item['stock']['available'] else '无货'}")
print(f"评价:{item['review']['rate']} | {item['review']['review_str']}")
print(f"品牌:{item['brand']['name']}")
print(f"详情页:{item['url']}")
else:
print(f"搜索失败:{result['error_msg']}(错误码:{result.get('code')})")