义乌购(聚焦小商品小商品批发 B2B 电商平台)的商品搜索功能(item_search接口,非官方命名)是获取小商品批发商品列表的核心入口,数据包含批发价、起订量、供应商资质、商位信息等 B2B 关键字段,对采购决策、供应商筛选、市场调研等场景具有重要价值。由于平台无公开官方 API,开发者需通过页面解析实现搜索对接。本文系统讲解接口逻辑、参数解析、技术实现及批发场景适配策略,助你构建稳定的义乌购商品列表获取系统。
一、接口基础认知(核心功能与场景)
二、对接前置准备(参数与 URL 结构)
三、接口调用流程(基于页面解析)
四、代码实现示例(Python)
import requests
import time
import random
import re
import urllib.parse
from bs4 import BeautifulSoup
from fake_useragent import UserAgent
from typing import List, Dict
class YiwugouSearchApi:
def __init__(self, proxy_pool: List[str] = None, cookie: str = ""):
self.base_url = "https://www.yiwugou.com/search/"
self.ua = UserAgent()
self.proxy_pool = proxy_pool # 代理池列表,如["http://ip:port", ...]
self.cookie = cookie # 登录态Cookie(用于完整价格和起订量)
# 分类ID映射(简化版)
self.category_map = {
"玩具-塑料玩具": "100120_305",
"饰品-耳环": "100230_412",
"家居-收纳盒": "100340_521"
}
# 区域映射(义乌国际商贸城)
self.market_map = {
"一区": 1,
"二区": 2,
"三区": 3,
"四区": 4,
"五区": 5
}
def _get_headers(self) -> Dict[str, str]:
"""生成随机请求头"""
headers = {
"User-Agent": self.ua.random,
"Referer": "https://www.yiwugou.com/category/",
"Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8"
}
if self.cookie:
headers["Cookie"] = self.cookie
return headers
def _get_proxy(self) -> Dict[str, str]:
"""随机获取代理"""
if self.proxy_pool and len(self.proxy_pool) > 0:
proxy = random.choice(self.proxy_pool)
return {"http": proxy, "https": proxy}
return None
def _clean_price(self, price_str: str) -> float:
"""清洗批发价(去除元/件、¥等)"""
if not price_str:
return 0.0
price_str = re.sub(r"[^\d.]", "", price_str)
return float(price_str) if price_str else 0.0
def _clean_quantity(self, quantity_str: str) -> int:
"""清洗起订量(提取数字)"""
if not quantity_str:
return 0
quantity_num = re.search(r"\d+", quantity_str)
return int(quantity_num.group()) if quantity_num else 0
def _clean_trade_count(self, trade_str: str) -> int:
"""清洗30天成交量(提取数字)"""
if not trade_str:
return 0
trade_num = re.search(r"\d+", trade_str)
return int(trade_num.group()) if trade_num else 0
def _parse_item(self, item_soup) -> Dict[str, str]:
"""解析单条商品数据"""
# 提取商品ID
link = item_soup.select_one("a.product-title")["href"] if item_soup.select_one("a.product-title") else ""
item_id = re.search(r"/product/detail/(\d+)\.html", link).group(1) if link else ""
# 提取商位信息(拆分区域、门号、楼层、商位号)
location = item_soup.select_one(".shop-location")?.text.strip() or ""
location_info = {"full": location}
if "号门" in location and "楼" in location:
# 示例:"一区12号门2楼3456商位" → 拆分区域、门号、楼层、商位号
area_match = re.search(r"(一|二|三|四|五)区", location)
door_match = re.search(r"(\d+)号门", location)
floor_match = re.search(r"(\d+)楼", location)
stall_match = re.search(r"(\d+)商位", location)
if area_match:
location_info["area"] = area_match.group()
if door_match:
location_info["door"] = door_match.group()
if floor_match:
location_info["floor"] = floor_match.group()
if stall_match:
location_info["stall"] = stall_match.group()
return {
"item_id": item_id,
"title": item_soup.select_one(".product-title")?.text.strip() or "",
"main_image": item_soup.select_one(".product-img img")?.get("src") or "",
"url": f"https://www.yiwugou.com{link}" if link.startswith("/") else link,
"price": {
"wholesale": self._clean_price(item_soup.select_one(".price-wholesale")?.text or ""),
"price_str": item_soup.select_one(".price-wholesale")?.text.strip() or "" # 原始价格文本(含阶梯信息)
},
"trade": {
"moq": self._clean_quantity(item_soup.select_one(".moq")?.text or ""),
"moq_str": item_soup.select_one(".moq")?.text.strip() or "", # 原始起订量文本
"mix_batch": "支持混批" in (item_soup.select_one(".mix-tag")?.text or ""),
"trade_count": self._clean_trade_count(item_soup.select_one(".trade-count")?.text or "")
},
"supplier": {
"name": item_soup.select_one(".shop-name")?.text.strip() or "",
"location": location_info,
"credit": item_soup.select_one(".credit-level")?.text.strip() or ""
}
}
def _parse_page(self, html: str) -> List[Dict]:
"""解析页面的商品列表"""
soup = BeautifulSoup(html, "lxml")
# 商品列表容器(需根据实际页面结构调整)
item_list = soup.select("div.product-item")
return [self._parse_item(item) for item in item_list if item]
def _get_total_pages(self, html: str) -> int:
"""获取总页数"""
soup = BeautifulSoup(html, "lxml")
page_box = soup.select_one(".pagination")
if not page_box:
return 1
# 提取最后一页页码
last_page = page_box.select("a")[-1].text.strip()
return int(last_page) if last_page.isdigit() else 1
def item_search(self,
keywords: str = "",
category: str = "",
price_min: float = None,
price_max: float = None,
quantity_min: int = None,
quantity_max: int = None,
market: str = "",
mix_batch: int = 0,
sort: str = "",
page_limit: int = 5) -> Dict:
"""
搜索义乌购商品列表
:param keywords: 搜索关键词
:param category: 分类名称(如“玩具-塑料玩具”)或分类ID
:param price_min: 最低批发价(元)
:param price_max: 最高批发价(元)
:param quantity_min: 最低起订量(件)
:param quantity_max: 最高起订量(件)
:param market: 供应商区域(如“一区”“二区”)
:param mix_batch: 是否支持混批(1=仅支持,0=全部)
:param sort: 排序方式(volume_desc/price_asc等)
:param page_limit: 最大页数(默认5)
:return: 标准化搜索结果
"""
try:
# 1. 参数预处理
if not keywords and not category:
return {"success": False, "error_msg": "关键词(keywords)和分类(category)至少需提供一个"}
# 转换分类名称为ID
if category in self.category_map:
cat_id = self.category_map[category]
else:
cat_id = category if category else ""
# 转换区域名称为数字
market_id = self.market_map.get(market, "") if market else ""
# 编码关键词
encoded_keywords = urllib.parse.quote(keywords, encoding="utf-8") if keywords else ""
all_items = []
current_page = 1
while current_page <= page_limit:
# 构建参数
params = {
"page": current_page
}
if encoded_keywords:
params["keywords"] = encoded_keywords
if cat_id:
params["cat_id"] = cat_id
if price_min is not None:
params["price_min"] = price_min
if price_max is not None:
params["price_max"] = price_max
if quantity_min is not None:
params["quantity_min"] = quantity_min
if quantity_max is not None:
params["quantity_max"] = quantity_max
if market_id:
params["market"] = market_id
if mix_batch in (0, 1):
params["mix_batch"] = mix_batch
if sort:
params["sort"] = sort
# 发送请求(带随机延迟)
time.sleep(random.uniform(3, 5)) # 义乌购反爬严格,延迟需更高
headers = self._get_headers()
proxy = self._get_proxy()
response = requests.get(
url=self.base_url,
params=params,
headers=headers,
proxies=proxy,
timeout=10
)
response.raise_for_status()
html = response.text
# 解析当前页商品
items = self._parse_page(html)
if not items:
break # 无数据,终止分页
all_items.extend(items)
# 获取总页数(仅第一页需要)
if current_page == 1:
total_pages = self._get_total_pages(html)
# 修正最大页数(不超过page_limit和100)
total_pages = min(total_pages, page_limit, 100)
if total_pages < current_page:
break
# 若当前页是最后一页,终止
if current_page >= total_pages:
break
current_page += 1
# 去重(基于item_id)
seen_ids = set()
unique_items = []
for item in all_items:
if item["item_id"] not in seen_ids:
seen_ids.add(item["item_id"])
unique_items.append(item)
return {
"success": True,
"total": len(unique_items),
"page_processed": current_page,
"items": unique_items
}
except requests.exceptions.HTTPError as e:
if "403" in str(e):
return {"success": False, "error_msg": "触发反爬,建议更换代理或Cookie", "code": 403}
if "401" in str(e):
return {"success": False, "error_msg": "需要登录,请提供有效Cookie", "code": 401}
return {"success": False, "error_msg": f"HTTP错误: {str(e)}", "code": response.status_code}
except Exception as e:
return {"success": False, "error_msg": f"搜索失败: {str(e)}", "code": -1}
# 使用示例
if __name__ == "__main__":
# 代理池(替换为有效代理)
PROXIES = [
"http://123.45.67.89:8888",
"http://98.76.54.32:8080"
]
# 登录态Cookie(从浏览器获取,用于完整价格)
COOKIE = "PHPSESSID=xxx; user_id=xxx"
# 初始化API客户端
search_api = YiwugouSearchApi(proxy_pool=PROXIES, cookie=COOKIE)
# 搜索“塑料玩具”,分类“玩具-塑料玩具”,价格1-5元,起订量50-500件,支持混批,按成交降序,最多3页
result = search_api.item_search(
keywords="塑料玩具",
category="玩具-塑料玩具",
price_min=1,
price_max=5,
quantity_min=50,
quantity_max=500,
mix_batch=1,
sort="volume_desc",
page_limit=3
)
if result["success"]:
print(f"搜索成功:共找到 {result['total']} 件商品,处理 {result['page_processed']} 页")
for i, item in enumerate(result["items"][:5]): # 打印前5条
print(f"\n商品 {i+1}:")
print(f"标题:{item['title'][:50]}...") # 截断长标题
print(f"价格:{item['price']['price_str']} | 起订量:{item['trade']['moq_str']}")
print(f"交易:30天成交{item['trade']['trade_count']}件 | 混批:{'是' if item['trade']['mix_batch'] else '否'}")
print(f"供应商:{item['supplier']['name']} | 诚信等级:{item['supplier']['credit']}")
print(f"商位:{item['supplier']['location']['full']}")
print(f"详情页:{item['url']}")
else:
print(f"搜索失败:{result['error_msg']}(错误码:{result.get('code')})")