货捕头(原 “一起做网店”)作为国内头部服装批发电商平台,聚焦女装鞋服、箱包等快消品批发场景,其商品搜索功能(对应item_search接口,非官方命名)是通过关键词、分类、价格等条件批量获取货源的核心工具,广泛应用于服装零售商选品、电商卖家货源对比、供应链分析等场景。由于货捕头无公开官方 API,开发者需通过合规的页面解析或第三方服务实现对接。本文将系统讲解item_search接口的对接逻辑、参数解析、技术实现及反爬应对,帮助开发者构建稳定高效的商品搜索数据获取系统。
一、接口基础认知(核心功能与场景)
二、对接前置准备(环境与参数解析)
三、接口调用流程(基于页面解析)
四、代码实现示例(Python)
import requests
import time
import random
import re
from urllib.parse import quote
from bs4 import BeautifulSoup
from fake_useragent import UserAgent
from typing import List, Dict
class HuobutouSearchApi:
def __init__(self, proxy_pool: List[str] = None):
self.base_url = "https://www.huobutou.com/search"
self.ua = UserAgent()
self.proxy_pool = proxy_pool # 代理池列表,如["http://ip:port", ...]
self.cate_id_map = self._load_category_map() # 分类ID映射表
def _load_category_map(self) -> Dict[str, str]:
"""加载分类名称-ID映射表(简化版)"""
return {
"女装": "1001",
"男装": "1002",
"童装": "1003",
"鞋包": "1004"
}
def _get_headers(self) -> Dict[str, str]:
"""生成随机请求头"""
return {
"User-Agent": self.ua.random,
"Referer": "https://www.huobutou.com/",
"Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8",
"Cookie": "PHPSESSID=xxx; user_id=anonymous; Hm_lvt_xxx=xxx" # 替换为实际Cookie
}
def _get_proxy(self) -> Dict[str, str]:
"""随机获取代理"""
if self.proxy_pool and len(self.proxy_pool) > 0:
proxy = random.choice(self.proxy_pool)
return {"http": proxy, "https": proxy}
return None
def _clean_price(self, price_str: str) -> float:
"""清洗价格字符串(去除¥、逗号等)"""
if not price_str:
return 0.0
price_str = re.sub(r"[^\d.]", "", price_str)
return float(price_str) if price_str else 0.0
def _clean_buyers(self, buyers_str: str) -> int:
"""清洗拿货人数字符串(提取数字)"""
if not buyers_str:
return 0
buyers_num = re.search(r"\d+", buyers_str)
return int(buyers_num.group()) if buyers_num else 0
def _parse_item(self, item_soup) -> Dict[str, str]:
"""解析单条商品数据"""
# 提取商品ID
link = item_soup.select_one("a.goods-link")["href"]
item_id = re.search(r"/goods/(\d+)\.html", link).group(1) if link else ""
# 提取核心字段
return {
"item_id": item_id,
"title": item_soup.select_one(".goods-title")?.text.strip() or "",
"main_image": item_soup.select_one(".goods-img img")?.get("src") or "",
"url": f"https://www.huobutou.com{link}" if link.startswith("/") else link,
"price": self._clean_price(item_soup.select_one(".price")?.text.strip() or ""),
"min_buy": item_soup.select_one(".min-buy")?.text.strip() or "",
"buyers": self._clean_buyers(item_soup.select_one(".buy-count")?.text.strip() or ""),
"supplier": {
"name": item_soup.select_one(".supplier-name")?.text.strip() or "",
"area": item_soup.select_one(".area")?.text.strip() or ""
},
"tags": [tag.text.strip() for tag in item_soup.select(".tag")] # 特色标签
}
def _parse_page(self, html: str) -> List[Dict]:
"""解析页面的商品列表"""
soup = BeautifulSoup(html, "lxml")
item_list = soup.select("div.goods-list > div.goods-item")
return [self._parse_item(item) for item in item_list if item]
def item_search(self,
keyword: str = "",
cate: str = "",
price_min: float = None,
price_max: float = None,
area: str = "",
min_buy: int = None,
is_drop: int = 0,
sort: str = "default",
page_limit: int = 5) -> Dict:
"""
搜索货捕头商品列表
:param keyword: 搜索关键词
:param cate: 分类名称(如“女装”)或分类ID
:param price_min: 起始价格(元)
:param price_max: 结束价格(元)
:param area: 供应商地区(如“广州”)
:param min_buy: 最低起批量(件)
:param is_drop: 是否支持一件代发(1-是,0-否)
:param sort: 排序方式(default/sale_desc/price_asc等)
:param page_limit: 最大页数(默认5)
:return: 标准化搜索结果
"""
try:
# 1. 参数预处理
if not keyword and not cate:
return {"success": False, "error_msg": "关键词(keyword)和分类(cate)至少需提供一个"}
# 转换分类名称为ID
if cate in self.cate_id_map:
cate_id = self.cate_id_map[cate]
else:
cate_id = cate if cate else "" # 若为ID则直接使用,为空则全类目搜索
# 编码关键词与地区(支持中文)
encoded_keyword = quote(keyword, encoding="utf-8") if keyword else ""
encoded_area = quote(area, encoding="utf-8") if area else ""
all_items = []
current_page = 1
while current_page <= page_limit:
# 构建参数
params = {
"q": encoded_keyword,
"sort": sort,
"page": current_page,
"is_drop": is_drop
}
if cate_id:
params["cate_id"] = cate_id
if price_min is not None:
params["price_min"] = price_min
if price_max is not None:
params["price_max"] = price_max
if encoded_area:
params["area"] = encoded_area
if min_buy is not None:
params["min_buy"] = min_buy
# 发送请求(带随机延迟)
time.sleep(random.uniform(4, 8)) # 批发平台间隔需更长,4-8秒
headers = self._get_headers()
proxy = self._get_proxy()
response = requests.get(
url=self.base_url,
params=params,
headers=headers,
proxies=proxy,
timeout=10
)
response.raise_for_status()
items = self._parse_page(response.text)
if not items:
break # 无数据,终止分页
all_items.extend(items)
# 若当前页商品数<30,说明是最后一页
if len(items) < 30:
break
current_page += 1
# 去重(基于item_id)
seen_ids = set()
unique_items = []
for item in all_items:
if item["item_id"] not in seen_ids:
seen_ids.add(item["item_id"])
unique_items.append(item)
return {
"success": True,
"total": len(unique_items),
"page_processed": current_page - 1,
"items": unique_items
}
except requests.exceptions.HTTPError as e:
if "403" in str(e):
return {"success": False, "error_msg": "触发反爬,建议更换代理或Cookie", "code": 403}
return {"success": False, "error_msg": f"HTTP错误: {str(e)}", "code": response.status_code}
except Exception as e:
return {"success": False, "error_msg": f"搜索失败: {str(e)}", "code": -1}
# 使用示例
if __name__ == "__main__":
# 代理池(替换为有效代理)
PROXIES = [
"http://123.45.67.89:8888",
"http://98.76.54.32:8080"
]
# 初始化API客户端
search_api = HuobutouSearchApi(proxy_pool=PROXIES)
# 搜索“夏季连衣裙”,分类“女装”,价格50-100元,广州地区,支持一件代发,按销量降序,最多3页
result = search_api.item_search(
keyword="夏季连衣裙",
cate="女装",
price_min=50,
price_max=100,
area="广州",
is_drop=1,
sort="sale_desc",
page_limit=3
)
if result["success"]:
print(f"搜索成功:共找到 {result['total']} 件商品,处理 {result['page_processed']} 页")
for i, item in enumerate(result["items"][:5]): # 打印前5条
print(f"\n商品 {i+1}:")
print(f"标题:{item['title'][:50]}...") # 截断长标题
print(f"价格:{item['price']} 元 | 起批量:{item['min_buy']} | 拿货人数:{item['buyers']}+人")
print(f"供应商:{item['supplier']['name']}({item['supplier']['area']})")
print(f"标签:{','.join(item['tags'])} | 详情页:{item['url']}")
else:
print(f"搜索失败:{result['error_msg']}(错误码:{result.get('code')})")