image_search.py 29 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776
  1. import faiss
  2. import numpy as np
  3. from PIL import Image
  4. import io
  5. import os
  6. from typing import List, Tuple, Optional, Union
  7. import torch
  8. import torchvision.transforms as transforms
  9. import torchvision.models as models
  10. from torchvision.models import ResNet50_Weights
  11. from scipy import ndimage
  12. import torch.nn.functional as F
  13. from pymongo import MongoClient
  14. import datetime
  15. import time
  16. class ImageSearchEngine:
  17. def __init__(self):
  18. # 添加mongodb
  19. self.mongo_client = MongoClient("mongodb://root:faiss_image_search@localhost:27017/") # MongoDB 连接字符串
  20. self.mongo_db = self.mongo_client["faiss_index"] # 数据库名称
  21. self.mongo_collection = self.mongo_db["mat_vectors"] # 集合名称
  22. self.mongo_collection.create_index([("product_id", 1)], unique=True)
  23. self.mongo_collection.create_index([("faiss_id", 1)], unique=True)
  24. # 初始化一个id生成计数
  25. self.faiss_id_max = 0
  26. # 检查GPU是否可用(仅用于PyTorch模型)
  27. self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
  28. print(f"使用设备: {self.device}")
  29. # 定义基础预处理转换
  30. self.base_transform = transforms.Compose([
  31. transforms.Grayscale(num_output_channels=3), # 转换为灰度图但保持3通道
  32. transforms.ToTensor(),
  33. transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
  34. ])
  35. # 加载预训练的ResNet模型
  36. self.model = models.resnet50(weights=ResNet50_Weights.IMAGENET1K_V2)
  37. # 移除最后的全连接层
  38. self.model = torch.nn.Sequential(*list(self.model.children())[:-1])
  39. self.model = self.model.to(self.device)
  40. self.model.eval()
  41. # 初始化FAISS索引(2048是ResNet50的特征维度)
  42. self.dimension = 2048
  43. # self.index = faiss.IndexFlatL2(self.dimension)
  44. # 改为支持删除的索引
  45. base_index = faiss.IndexFlatL2(self.dimension)
  46. self.index = faiss.IndexIDMap(base_index)
  47. # 尝试加载现有索引,如果不存在则创建新索引
  48. if self._load_index():
  49. print("成功加载现有索引")
  50. def _batch_generator(self, cursor, batch_size):
  51. """从MongoDB游标中分批生成数据"""
  52. batch = []
  53. for doc in cursor:
  54. batch.append(doc)
  55. if len(batch) == batch_size:
  56. yield batch
  57. batch = []
  58. if batch:
  59. yield batch
  60. def _process_image(self, image_path: str) -> Optional[torch.Tensor]:
  61. """处理单张图片并提取特征。
  62. Args:
  63. image_path: 图片路径
  64. Returns:
  65. 处理后的特征向量,如果处理失败返回None
  66. """
  67. try:
  68. # 读取图片
  69. image = Image.open(image_path)
  70. # 确保图片是RGB模式
  71. if image.mode != 'RGB':
  72. image = image.convert('RGB')
  73. start_ms_time = time.time()
  74. # 提取多尺度特征
  75. multi_scale_features = self._extract_multi_scale_features(image)
  76. end_ms_time = time.time()
  77. print(f"提取多尺度特征耗时: { end_ms_time - start_ms_time } s",)
  78. if multi_scale_features is None:
  79. return None
  80. start_sw_time = time.time()
  81. # 提取滑动窗口特征
  82. sliding_window_features = self._extract_sliding_window_features(image)
  83. end_sw_time = time.time()
  84. print(f"提取滑动窗口耗时: { end_sw_time - start_sw_time } s",)
  85. if sliding_window_features is None:
  86. return None
  87. # 组合特征(加权平均)
  88. combined_feature = multi_scale_features * 0.6 + sliding_window_features * 0.4
  89. # 标准化特征
  90. combined_feature = F.normalize(combined_feature, p=2, dim=0)
  91. return combined_feature
  92. except Exception as e:
  93. print(f"处理图片时出错: {e}")
  94. return None
  95. def _extract_multi_scale_features(self, image: Image.Image) -> Optional[torch.Tensor]:
  96. """基于原图分辨率的多尺度特征提取(智能动态调整版)
  97. Args:
  98. image: PIL图片对象
  99. Returns:
  100. 多尺度特征向量,处理失败返回None
  101. """
  102. try:
  103. # 获取原图信息
  104. orig_w, orig_h = image.size
  105. max_edge = max(orig_w, orig_h)
  106. aspect_ratio = orig_w / orig_h
  107. # 动态调整策略 -------------------------------------------
  108. # 策略1:根据最大边长确定基准尺寸
  109. base_size = min(max_edge, 3000) # 不超过模型支持的最大尺寸
  110. # 策略2:自动生成窗口尺寸(等比数列)
  111. min_size = 224 # 最小特征尺寸
  112. num_scales = 4 # 固定采样点数
  113. scale_factors = np.logspace(0, 1, num_scales, base=2)
  114. window_sizes = [int(base_size * f) for f in scale_factors]
  115. window_sizes = sorted({min(max(s, min_size), 3000) for s in window_sizes})
  116. # 策略3:根据长宽比调整尺寸组合
  117. if aspect_ratio > 1.5: # 宽幅图像
  118. window_sizes = [int(s*aspect_ratio) for s in window_sizes]
  119. elif aspect_ratio < 0.67: # 竖幅图像
  120. window_sizes = [int(s/aspect_ratio) for s in window_sizes]
  121. # 预处理优化 --------------------------------------------
  122. # 选择最优基准尺寸(最接近原图尺寸的2的幂次)
  123. base_size = 2 ** int(np.log2(base_size))
  124. base_transform = transforms.Compose([
  125. transforms.Resize((base_size, base_size),
  126. interpolation=transforms.InterpolationMode.LANCZOS),
  127. self.base_transform
  128. ])
  129. # 半精度加速
  130. self.model.half()
  131. img_base = base_transform(image).unsqueeze(0).to(self.device).half()
  132. # 动态特征提取 ------------------------------------------
  133. features = []
  134. for size in window_sizes:
  135. # 保持长宽比的重采样
  136. target_size = (int(size*aspect_ratio), size) if aspect_ratio > 1 else (size, int(size/aspect_ratio))
  137. # GPU加速的智能插值
  138. img_tensor = torch.nn.functional.interpolate(
  139. img_base,
  140. size=target_size,
  141. mode= 'area' if size < base_size else 'bicubic', # 下采样用area,上采样用bicubic
  142. align_corners=False
  143. )
  144. # 自适应归一化(保持原图统计特性)
  145. if hasattr(self, 'adaptive_normalize'):
  146. img_tensor = self.adaptive_normalize(img_tensor)
  147. # 混合精度推理
  148. with torch.no_grad(), torch.cuda.amp.autocast():
  149. feature = self.model(img_tensor)
  150. features.append(feature.squeeze().float())
  151. # 动态权重分配 ------------------------------------------
  152. # 基于尺寸差异的权重(尺寸越接近原图权重越高)
  153. size_diffs = [abs(size - base_size) for size in window_sizes]
  154. weights = 1 / (torch.tensor(size_diffs, device=self.device) + 1e-6)
  155. weights = weights / weights.sum()
  156. # 加权融合
  157. final_feature = torch.stack([f * w for f, w in zip(features, weights)]).sum(dim=0)
  158. return final_feature
  159. except Exception as e:
  160. print(f"智能特征提取失败: {e}")
  161. return None
  162. def _extract_multi_scale_features_bak(self, image: Image.Image) -> Optional[torch.Tensor]:
  163. """提取多尺度特征。
  164. Args:
  165. image: PIL图片对象
  166. Returns:
  167. 多尺度特征向量,如果处理失败返回None
  168. """
  169. try:
  170. features_list = []
  171. window_sizes = [256, 512,1024,1560,2048,2560,3000]
  172. # 多尺度转换 - 增加更多尺度
  173. #self.multi_scale_sizes = [224, 384, 512, 768, 1024, 1536,2048,3000]
  174. for size in window_sizes:
  175. # 调整图片大小
  176. transform = transforms.Compose([
  177. transforms.Resize((size, size), interpolation=transforms.InterpolationMode.LANCZOS),
  178. self.base_transform
  179. ])
  180. # 应用变换
  181. img_tensor = transform(image).unsqueeze(0).to(self.device)
  182. # 提取特征
  183. with torch.no_grad():
  184. feature = self.model(img_tensor)
  185. features_list.append(feature.squeeze())
  186. # 计算加权平均,较大尺度的权重更高
  187. weights = torch.linspace(1, 2, len(features_list)).to(self.device)
  188. weights = weights / weights.sum()
  189. weighted_features = torch.stack([f * w for f, w in zip(features_list, weights)])
  190. final_feature = weighted_features.sum(dim=0)
  191. return final_feature
  192. except Exception as e:
  193. print(f"提取多尺度特征时出错: {e}")
  194. return None
  195. def _extract_sliding_window_features(self, image: Image.Image) -> Optional[torch.Tensor]:
  196. """优化版滑动窗口特征提取(动态调整+批量处理)
  197. Args:
  198. image: PIL图片对象
  199. Returns:
  200. 滑动窗口特征向量,处理失败返回None
  201. """
  202. try:
  203. # 获取原图信息
  204. orig_w, orig_h = image.size
  205. aspect_ratio = orig_w / orig_h
  206. # 动态窗口配置 -------------------------------------------
  207. # 根据原图尺寸自动选择关键窗口尺寸(示例逻辑,需根据实际调整)
  208. max_dim = max(orig_w, orig_h)
  209. window_sizes = sorted({
  210. int(2 ** np.round(np.log2(max_dim * 0.1))), # 约10%尺寸
  211. int(2 ** np.floor(np.log2(max_dim * 0.5))), # 约50%尺寸
  212. int(2 ** np.ceil(np.log2(max_dim))) # 接近原图尺寸
  213. } & {256, 512, 1024, 2048, 3000}) # 与预设尺寸取交集
  214. # 智能步长调整(窗口尺寸越大步长越大)
  215. stride_ratios = {256:0.5, 512:0.4, 1024:0.3, 2048:0.2, 3000:0.15}
  216. # 预处理优化 --------------------------------------------
  217. # 生成基准图像(最大窗口尺寸)
  218. max_win_size = max(window_sizes)
  219. base_size = (int(max_win_size * aspect_ratio), max_win_size) if aspect_ratio > 1 else \
  220. (max_win_size, int(max_win_size / aspect_ratio))
  221. transform = transforms.Compose([
  222. transforms.Resize(base_size[::-1], interpolation=transforms.InterpolationMode.LANCZOS),
  223. self.base_transform
  224. ])
  225. base_img = transform(image).to(self.device)
  226. # 半精度加速
  227. self.model.half()
  228. base_img = base_img.half()
  229. # 批量特征提取 ------------------------------------------
  230. all_features = []
  231. for win_size in window_sizes:
  232. # 动态步长选择
  233. stride = int(win_size * stride_ratios.get(win_size, 0.3))
  234. # 生成窗口坐标(考虑边缘填充)
  235. h, w = base_img.shape[1:]
  236. num_h = (h - win_size) // stride + 1
  237. num_w = (w - win_size) // stride + 1
  238. # 调整窗口数量上限(防止显存溢出)
  239. MAX_WINDOWS = 32 # 根据显存调整
  240. if num_h * num_w > MAX_WINDOWS:
  241. stride = int(np.sqrt(h * w * win_size**2 / MAX_WINDOWS))
  242. num_h = (h - win_size) // stride + 1
  243. num_w = (w - win_size) // stride + 1
  244. # 批量裁剪窗口
  245. windows = []
  246. for i in range(num_h):
  247. for j in range(num_w):
  248. top = i * stride
  249. left = j * stride
  250. window = base_img[:, top:top+win_size, left:left+win_size]
  251. windows.append(window)
  252. if not windows:
  253. continue
  254. # 批量处理(自动分块防止OOM)
  255. BATCH_SIZE = 8 # 根据显存调整
  256. with torch.no_grad(), torch.cuda.amp.autocast():
  257. for i in range(0, len(windows), BATCH_SIZE):
  258. batch = torch.stack(windows[i:i+BATCH_SIZE])
  259. features = self.model(batch)
  260. all_features.append(features.cpu().float()) # 转移至CPU释放显存
  261. # 特征融合 ---------------------------------------------
  262. if not all_features:
  263. return None
  264. final_feature = torch.cat([f.view(-1, f.shape[-1]) for f in all_features], dim=0)
  265. final_feature = final_feature.mean(dim=0).to(self.device)
  266. return final_feature
  267. except Exception as e:
  268. print(f"滑动窗口特征提取失败: {e}")
  269. return None
  270. def _extract_sliding_window_features_bak(self, image: Image.Image) -> Optional[torch.Tensor]:
  271. """使用滑动窗口提取特征。
  272. Args:
  273. image: PIL图片对象
  274. Returns:
  275. 滑动窗口特征向量,如果处理失败返回None
  276. """
  277. try:
  278. window_sizes = [256, 512,1024,1560,2048,2560,3000]
  279. stride_ratio = 0.25 # 步长比例
  280. features_list = []
  281. for window_size in window_sizes:
  282. # 调整图片大小,保持宽高比
  283. aspect_ratio = image.size[0] / image.size[1]
  284. if aspect_ratio > 1:
  285. new_width = int(window_size * aspect_ratio)
  286. new_height = window_size
  287. else:
  288. new_width = window_size
  289. new_height = int(window_size / aspect_ratio)
  290. transform = transforms.Compose([
  291. transforms.Resize((new_height, new_width), interpolation=transforms.InterpolationMode.LANCZOS),
  292. self.base_transform
  293. ])
  294. # 转换图片
  295. img_tensor = transform(image)
  296. # 计算步长
  297. stride = int(window_size * stride_ratio)
  298. # 使用滑动窗口提取特征
  299. for i in range(0, img_tensor.size(1) - window_size + 1, stride):
  300. for j in range(0, img_tensor.size(2) - window_size + 1, stride):
  301. window = img_tensor[:, i:i+window_size, j:j+window_size].unsqueeze(0).to(self.device)
  302. with torch.no_grad():
  303. feature = self.model(window)
  304. features_list.append(feature.squeeze())
  305. # 如果没有提取到特征,返回None
  306. if not features_list:
  307. return None
  308. # 计算所有特征的平均值
  309. final_feature = torch.stack(features_list).mean(dim=0)
  310. return final_feature
  311. except Exception as e:
  312. print(f"提取滑动窗口特征时出错: {e}")
  313. return None
  314. def extract_features(self, img: Image.Image) -> np.ndarray:
  315. """结合多尺度和滑动窗口提取特征。
  316. Args:
  317. img: PIL图像对象
  318. Returns:
  319. 特征向量
  320. """
  321. try:
  322. # 提取多尺度特征
  323. multi_scale_features = self._extract_multi_scale_features(img)
  324. if multi_scale_features is None:
  325. raise ValueError("无法提取多尺度特征")
  326. # 提取滑动窗口特征
  327. sliding_window_features = self._extract_sliding_window_features(img)
  328. if sliding_window_features is None:
  329. raise ValueError("无法提取滑动窗口特征")
  330. # 组合特征
  331. combined_feature = multi_scale_features * 0.6 + sliding_window_features * 0.4
  332. # 标准化特征
  333. combined_feature = F.normalize(combined_feature, p=2, dim=0)
  334. # 转换为numpy数组
  335. return combined_feature.cpu().numpy()
  336. except Exception as e:
  337. print(f"特征提取失败: {e}")
  338. raise
  339. def add_image_from_url(self, image_path: str, product_id: str) -> bool:
  340. """从URL添加图片到索引。
  341. Args:
  342. url: 图片URL
  343. product_id: 图片对应的商品ID
  344. Returns:
  345. 添加成功返回True,失败返回False
  346. """
  347. try:
  348. # 使用原有的特征提取逻辑
  349. feature = self._process_image(image_path)
  350. if feature is None:
  351. print("无法提取特征")
  352. return False
  353. # 转换为numpy数组并添加到索引
  354. feature_np = feature.cpu().numpy().reshape(1, -1).astype('float32')
  355. idx = self.faiss_id_max + 1
  356. print(f"当前: idx { idx }")
  357. if not isinstance(idx, int) or idx <= 0:
  358. print("ID生成失败")
  359. return False
  360. self.faiss_id_max = idx
  361. # 向数据库写入记录
  362. record = {
  363. "faiss_id": idx,
  364. "product_id": product_id,
  365. "vector": feature_np.flatten().tolist(), # 将numpy数组转为列表
  366. "created_at": datetime.datetime.utcnow() # 记录创建时间
  367. }
  368. self.mongo_collection.insert_one(record)
  369. # 为向量设置ID并添加到Faiss索引
  370. self.index.add_with_ids(feature_np, np.array([idx], dtype='int64'))
  371. print(f"已添加图片: product_id: {product_id}, faiss_id: {idx}")
  372. return True
  373. except Exception as e:
  374. print(f"添加图片时出错: {e}")
  375. return False
  376. def get_product_id_by_faiss_id(self, faiss_id: int) -> Optional[str]:
  377. """根据 faiss_id 查找 MongoDB 中的 product_id。
  378. Args:
  379. faiss_id: Faiss 索引中的 ID
  380. Returns:
  381. 对应的 product_id,如果未找到则返回 None
  382. """
  383. try:
  384. faiss_id = int(faiss_id)
  385. # 检查 faiss_id 是否有效
  386. if faiss_id < 0:
  387. print(f"无效的 faiss_id: {faiss_id}")
  388. return None
  389. # 查询 MongoDB
  390. query = {"faiss_id": faiss_id}
  391. record = self.mongo_collection.find_one(query)
  392. # 检查是否找到记录
  393. if record is None:
  394. print(f"未找到 faiss_id 为 {faiss_id} 的记录")
  395. return None
  396. # 返回 product_id
  397. product_id = record.get("product_id")
  398. if product_id is None:
  399. print(f"记录中缺少 product_id 字段: {record}")
  400. return None
  401. return str(product_id) # 确保返回字符串类型
  402. except Exception as e:
  403. print(f"查询 faiss_id 为 {faiss_id} 的记录时出错: {e}")
  404. return None
  405. def search(self, image_path: str = None, top_k: int = 5) -> List[Tuple[str, float]]:
  406. try:
  407. if image_path is None:
  408. print("搜索图片下载失败!")
  409. return []
  410. feature = self._process_image(image_path)
  411. if feature is None:
  412. print("无法提取查询图片的特征")
  413. return []
  414. # 将特征转换为numpy数组
  415. feature_np = feature.cpu().numpy().reshape(1, -1).astype('float32')
  416. start_vector_time = time.time()
  417. # 搜索最相似的图片
  418. distances, indices = self.index.search(feature_np, min(top_k, self.index.ntotal))
  419. end_vector_time = time.time()
  420. print(f"搜索vector耗时: {end_vector_time - start_vector_time}")
  421. start_other_time = time.time()
  422. # 返回结果
  423. results = []
  424. for faiss_id, dist in zip(indices[0], distances[0]):
  425. if faiss_id == -1: # Faiss返回-1表示无效结果
  426. continue
  427. # 将距离转换为相似度分数(0-1之间,1表示完全相似)
  428. similarity = 1.0 / (1.0 + dist)
  429. # 根据faiss_id获取product_id
  430. print(f"搜索结果->faiss_id: { faiss_id }")
  431. product_id = self.get_product_id_by_faiss_id(faiss_id)
  432. if product_id:
  433. results.append((product_id, similarity))
  434. end_other_time = time.time()
  435. print(f"查询结果耗时: {end_other_time - start_other_time}")
  436. return results
  437. except Exception as e:
  438. print(f"搜索图片时出错: {e}")
  439. return []
  440. def _load_index(self) -> bool:
  441. """从数据库分批加载数据并初始化faiss_id_max"""
  442. try:
  443. # 配置参数
  444. BATCH_SIZE = 10000
  445. # 获取文档总数
  446. total_docs = self.mongo_collection.count_documents({})
  447. if total_docs == 0:
  448. print("数据库为空,跳过索引加载")
  449. return True # 空数据库不算错误
  450. # 用于跟踪最大ID(兼容空数据情况)
  451. max_faiss_id = -1
  452. # 分批加载数据
  453. cursor = self.mongo_collection.find({}).batch_size(BATCH_SIZE)
  454. for batch in self._batch_generator(cursor, BATCH_SIZE):
  455. # 处理批次数据
  456. batch_vectors = []
  457. batch_ids = []
  458. current_max = -1
  459. for doc in batch:
  460. try:
  461. # 数据校验
  462. if len(doc['vector']) != self.dimension:
  463. continue
  464. if not isinstance(doc['faiss_id'], int):
  465. continue
  466. # 提取数据
  467. faiss_id = int(doc['faiss_id'])
  468. vector = doc['vector']
  469. print(f"load faiss_id :{ faiss_id }")
  470. # 更新最大值
  471. if faiss_id > current_max:
  472. current_max = faiss_id
  473. # 收集数据
  474. batch_vectors.append(vector)
  475. batch_ids.append(faiss_id)
  476. except Exception as e:
  477. print(f"文档处理异常: {str(e)}")
  478. continue
  479. # 批量添加到索引
  480. if batch_vectors:
  481. vectors_np = np.array(batch_vectors, dtype='float32')
  482. ids_np = np.array(batch_ids, dtype='int64')
  483. self.index.add_with_ids(vectors_np, ids_np)
  484. # 更新全局最大值
  485. if current_max > max_faiss_id:
  486. max_faiss_id = current_max
  487. print(f"向量总数: {self.index.ntotal}")
  488. # 设置初始值(如果已有更大值则保留)
  489. if max_faiss_id != -1:
  490. new_id = max_faiss_id
  491. self.faiss_id_max = new_id
  492. print(f"ID计数器初始化完成,当前值: {new_id}")
  493. return True
  494. except Exception as e:
  495. print(f"索引加载失败: {str(e)}")
  496. return False
  497. def clear(self) -> bool:
  498. """清除所有索引和 MongoDB 中的记录。
  499. Returns:
  500. 清除成功返回 True,失败返回 False
  501. """
  502. try:
  503. # 检查索引是否支持重置操作
  504. if not hasattr(self.index, "reset"):
  505. print("当前索引不支持重置操作")
  506. return False
  507. # 重置 Faiss 索引
  508. self.index.reset()
  509. print("已清除 Faiss 索引中的所有向量")
  510. # 删除 MongoDB 中的所有记录
  511. result = self.mongo_collection.delete_many({})
  512. print(f"已从 MongoDB 中删除 {result.deleted_count} 条记录")
  513. self.faiss_id_max = 0
  514. return True
  515. except Exception as e:
  516. print(f"清除索引时出错: {e}")
  517. return False
  518. def remove_image(self, image_path: str) -> bool:
  519. """从索引中移除指定图片。
  520. Args:
  521. image_path: 要移除的图片路径
  522. Returns:
  523. 是否成功移除
  524. """
  525. try:
  526. if image_path in self.image_paths:
  527. idx = self.image_paths.index(image_path)
  528. # 创建新的索引
  529. new_index = faiss.IndexFlatL2(self.dimension)
  530. # 获取所有特征
  531. all_features = faiss.vector_to_array(self.index.get_xb()).reshape(-1, self.dimension)
  532. # 移除指定图片的特征
  533. mask = np.ones(len(self.image_paths), dtype=bool)
  534. mask[idx] = False
  535. filtered_features = all_features[mask]
  536. # 更新索引
  537. if len(filtered_features) > 0:
  538. new_index.add(filtered_features)
  539. # 更新图片路径列表
  540. self.image_paths.pop(idx)
  541. self.product_ids.pop(idx)
  542. # 更新索引
  543. self.index = new_index
  544. # 保存更改
  545. self._save_index()
  546. print(f"已移除图片: {image_path}")
  547. return True
  548. else:
  549. print(f"图片不存在: {image_path}")
  550. return False
  551. except Exception as e:
  552. print(f"移除图片时出错: {e}")
  553. return False
  554. def remove_by_product_id(self, product_id: str) -> bool:
  555. """通过 product_id 删除向量索引和数据库记录。
  556. Args:
  557. product_id: 要删除的商品 ID
  558. Returns:
  559. 删除成功返回 True,失败返回 False
  560. """
  561. try:
  562. # 检查 product_id 是否有效
  563. if not product_id or not isinstance(product_id, str):
  564. print(f"无效的 product_id: {product_id}")
  565. return False
  566. # 查询 MongoDB 获取 faiss_id
  567. query = {"product_id": product_id}
  568. record = self.mongo_collection.find_one(query)
  569. # 检查是否找到记录
  570. if record is None:
  571. print(f"未找到 product_id 为 {product_id} 的记录")
  572. return False
  573. # 提取 faiss_id
  574. faiss_id = record.get("faiss_id")
  575. if faiss_id is None:
  576. print(f"记录中缺少 faiss_id 字段: {record}")
  577. return False
  578. # 删除 Faiss 索引中的向量
  579. if isinstance(self.index, faiss.IndexIDMap):
  580. # 检查 faiss_id 是否在索引中
  581. # ids = self.index.id_map.at(1) # 获取所有 ID
  582. # if faiss_id not in ids:
  583. # print(f"faiss_id {faiss_id} 不在索引中")
  584. # return False
  585. # 删除向量
  586. self.index.remove_ids(np.array([faiss_id], dtype='int64'))
  587. print(f"已从 Faiss 索引中删除 faiss_id: {faiss_id}")
  588. else:
  589. print("当前索引不支持删除操作")
  590. return False
  591. # 删除 MongoDB 中的记录
  592. result = self.mongo_collection.delete_one({"faiss_id": faiss_id})
  593. if result.deleted_count == 1:
  594. print(f"已从 MongoDB 中删除 faiss_id: {faiss_id}")
  595. return True
  596. else:
  597. print(f"未找到 faiss_id 为 {faiss_id} 的记录")
  598. return False
  599. except Exception as e:
  600. print(f"删除 product_id 为 {product_id} 的记录时出错: {e}")
  601. traceback.print_exc()
  602. return False
  603. def get_index_size(self) -> int:
  604. """获取索引中的图片数量。
  605. Returns:
  606. 索引中的图片数量
  607. """
  608. return len(self.image_paths)