欢迎访问 生活随笔!

生活随笔

当前位置: 首页 > 编程语言 > python >内容正文

python

Python爬取网上车市[http://www.cheshi.com/]的数据

发布时间:2024/3/12 python 60 豆豆
生活随笔 收集整理的这篇文章主要介绍了 Python爬取网上车市[http://www.cheshi.com/]的数据 小编觉得挺不错的,现在分享给大家,帮大家做个参考.
#coding:utf8 #爬取网上车市[http://www.cheshi.com/]的数据 import requests, json, time, re, os, sys, time,urllib2,shutil,string import threading import MySQLdb import redis from pyquery import PyQuery as pq from urlparse import urljoin from selenium import webdriver#设置utf-8编码格式 reload(sys) sys.setdefaultencoding( "utf-8" )#读取文件内容 def getLines(filename):file_object = open(filename,'rb')lines = file_object.readlines()return lines#根据url_name获取url_type_id def get_url_type_id(v_url_name):#3000 品牌 奥迪#4000 奥迪 奥迪A6url_type_id = ''for line in getLines('/home/shutong/crawl/car/script/brand.ini'):line = line.strip()url_cate = line.split(',')[1]url_name = line.split(',')[2]if v_url_name.strip() == url_name.strip():url_type_id = line.split(',')[0]return url_type_idbreakelse :continuereturn url_type_idclass ResultData():'''数据文件类'''def __init__(self,industry_id,url_type_id,url_name,url_value,web_type_id,web_name,date_id):self.industry_id = industry_idself.url_type_id = url_type_idself.url_name = url_nameself.url_value = url_valueself.web_type_id = web_type_idself.web_name = web_nameself.date_id = date_iddef __str__(self):return self.industry_id,self.url_type_id,self.url_name,self.url_value,self.web_type_id,self.self.web_name,ResultData.date_idclass Base(object):'''文件保存的基类'''def __init__(self,dev_prd_flag):self.dev_prd_flag = dev_prd_flagpass #print "This is init function"#保存数据到文件文件def _saveContext(self,filename,*name):format = '^'context = name[0]for i in name[1:]:context = context + format + str(i)context = str(context).replace('','(').replace('',')').replace('',',').replace('',':')if self.dev_prd_flag != 'prd':print contextelse:#去除文件路径名首位空格filename = filename.strip()#读取目录名称path = os.path.dirname(filename)#如果目录不存在则创建目录if not os.path.exists(path):os.makedirs(path) #读取文件名称,以追加的方式写文件name = os.path.basename(filename)fp = open(filename,'a')fp.write(context+'\n')fp.close()def saveData(self,filename,ResultData):if ResultData.url_type_id:self._saveContext(filename,ResultData.industry_id,ResultData.url_type_id,ResultData.url_name,ResultData.url_value,ResultData.web_type_id,ResultData.web_name,ResultData.date_id)else:#将数据进行保存在redis中r = redis.Redis(host='192.168.122.140',port=6379,db=0)r.sadd('errorList',ResultData.industry_id+'^'+ResultData.url_name+'^'+ResultData.url_value)def __str__(self):return '保存文件的基类'class Crawl(Base):'''爬虫基础类'''driver = None#构造方法def __init__(self,name,dev_prd_flag):super(Crawl,self).__init__(dev_prd_flag='dev')self.dev_prd_flag = dev_prd_flagself.name = name#self.driver = init_driver()'''初始化启动浏览器'''def init_driver(self):ua = "Mozilla/5.0 (Windows NT 6.3; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/56.0.2924.3 Safari/537.36"cap = webdriver.DesiredCapabilities.PHANTOMJScap["phantomjs.page.settings.resourceTimeout"] = 20000cap["phantomjs.page.settings.loadImages"] = Truecap["phantomjs.page.settings.disk-cache"] = Truecap["phantomjs.page.settings.userAgent"] = uacap["phantomjs.page.customHeaders.User-Agent"] =uacap["phantomjs.page.customHeaders.Referer"] = "http://tj.ac.10086.cn/login/"driver = webdriver.PhantomJS(executable_path='/home/shutong/phantomjs/bin/phantomjs',desired_capabilities=cap, service_args=['--ignore-ssl-errors=true'])driver.set_page_load_timeout(60) driver.set_script_timeout(60)#return driverself.driver = driver#获取网页文本def getHtml(self,url,code='utf-8'):html = ''try:if self.driver:self.driver.get(url)html = self.driver.page_sourceelse :headers = {'User-Agent':'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:23.0) Gecko/20100101 Firefox/23.0'} request = urllib2.Request(url,headers=headers)response = urllib2.urlopen(request,data=None,timeout=60)if code:if code == 'gbk':html = unicode(response.read(),'gbk')else:html = unicode(response.read(),str(code))except:passfinally:return html '''析构方法'''def __del__(self):if self.driver:self.driver.quit()print "浏览器成功关闭"else:print "浏览器未打开使用"def __str__(self):return "爬虫基础类"def start_crawl(url):#连接redis数据库r = redis.Redis(host='192.168.122.140',port=6379,db=0)urllist = []html = crawl.getHtml(url,'gbk')d = pq(html)for a in d('a'):a = pq(a)try:url_value = urljoin(url,a.attr('href'))name = a.text()#if re.compile(r'([a-z]+) ([a-z]+)', re.I)#http://newcar.xcar.com.cn/162/if re.match( r'http://newcar.xcar.com.cn/[0-9]{1,10}/$', url_value, re.M|re.I):#print url_value,name#urllist.append(url_value)#将数据存储在redis中#r.sadd('urllist',url_value)pass elif re.match(r'http://newcar.xcar.com.cn/m[0-9]{1,10}/$',url_value,re.M|re.I):r.sadd('urllist',url_value)except:pass #for index in urllist:for index in list(set(urllist)):print indextry:#return start_crawl(index)pass except:pass def start_wscs_crawl(url):#生产或者测试标志 dev为测试 prd为生产flag = 'prd'#汽车行业IDindustry_id = '004004'#移动PC端web_type_idweb_type_id = '0'#网站名称web_name = '网上车市'crawl = Crawl('网上车市',flag)#加载浏览器#crawl.init_driver()html = crawl.getHtml(url)d = pq(html)for div in d('div').filter('.list-box'):div = pq(div)#品牌brand = div('div').filter('.lb').find('span').text()#品牌urlbrand_url = urljoin(url,div('div').filter('.lb')('a').attr('href'))#print brand,brand_urlurl_type_id = '3000'url_name = brandurl_value = brand_url#保存品牌数据#crawl._saveContext(filename,industry_id,url_type_id,url_name,url_value,web_type_id,web_name)resultData = ResultData(industry_id,url_type_id,url_name,url_value,web_type_id,web_name,date_id)crawl.saveData(filename,resultData)brand = div('div').filter('.rb')('dl')('dt')('a').text().replace('>>','')brand_url = urljoin(url,div('div').filter('.rb')('dl')('dt')('a').attr('href'))#print brand,brand_urlurl_type_id = '3000'url_name = brandurl_value = brand_url#保存品牌数据 resultData = ResultData(industry_id,url_type_id,url_name,url_value,web_type_id,web_name,date_id)crawl.saveData(filename,resultData)#crawl._saveContext(filename,industry_id,url_type_id,url_name,url_value,web_type_id,web_name)for dd in div('div').filter('.rb')('dl')('dd'):dd = pq(dd)car_name = dd('div').filter('.con')('h4').text()car_url = urljoin(url,dd('div').filter('.con')('h4')('a').attr('href'))#print car_name,car_urlurl_type_id = get_url_type_id(car_name)url_name = car_nameurl_value = car_url #保存车系数据#crawl._saveContext(filename,industry_id,url_type_id,url_name,url_value,web_type_id,web_name) resultData = ResultData(industry_id,url_type_id,url_name,url_value,web_type_id,web_name,date_id)crawl.saveData(filename,resultData)#制作汽车实体信息#品牌 子品牌 车系名称 价位 图片url 网站名称 #多线程启动 def start_mutli_crawl():list = []for word in string.uppercase:#url = 'http://www.autohome.com.cn/grade/carhtml/%s.html' %(word)url = 'http://product.cheshi.com/static/selectcar/%s.html?t=1519713137030' % (word)list.append(url)#定义线程数组threads = []#创建线程for i in range(len(list)):t = threading.Thread(target=start_wscs_crawl,args=(list[i],))threads.append(t)#开启线程for i in range(len(list)):threads[i].start()for i in range(len(list)):threads[i].join()#filename = '/home/shutong/crawl/car/script/wscs.csv' #date_id = '20180227' date_id = sys.argv[1] filename = sys.argv[2] #url = 'http://product.cheshi.com/static/selectcar/B.html?t=1519713137030' #start_wscs_crawl(url) #多线程启动 start_mutli_crawl()

 

转载于:https://www.cnblogs.com/Jims2016/p/8554928.html

总结

以上是生活随笔为你收集整理的Python爬取网上车市[http://www.cheshi.com/]的数据的全部内容,希望文章能够帮你解决所遇到的问题。

如果觉得生活随笔网站内容还不错,欢迎将生活随笔推荐给好友。