当前位置: 代码迷 >> 综合 >> 爬虫中连接数据库,两种获取网页内容的方式,BeautifulSoup, 正则表达式的使用,爬json格式文件
  详细解决方案

爬虫中连接数据库,两种获取网页内容的方式,BeautifulSoup, 正则表达式的使用,爬json格式文件

热度:51   发布时间:2023-11-17 22:19:21.0

一,连接数据库

1.连接MySQL 

import pymysqlconn=pymysql.connect(host='localhost',port=3306,user='root',passwd='***',db='***', charset='utf8')
cur = conn.cursor()# 以下两步把游标与数据库连接都关闭,这也是必须的!
cur.close()
conn.close()

往MySQL中插入数据

         try:cur.execute("USE ssm_crud")cur.execute('SET NAMES utf8')cur.execute('SET CHARACTER SET utf8')cur.execute('SET character_set_connection=utf8')cur.execute('insert into GYZC_XCUL(索引号,信息分类,发布机构,发文日期,文号,是否有效,信息名称) values(%s,%s,%s,%s,%s,%s,%s)',[list[0], list[1], list[2], list[3], list[4], list[5], list[6]])except Exception as e:print(e)finally:pass

 

2.连接Oracle

import cx_Oracle #oralce连接工具conn = cx_Oracle.connect('用户名', '密码', 'localhost:1521/服务器名(service_name)')  # 连接数据库
cur = conn.cursor()  # 获取cursorcur.close()
conn.close()

往Oracle中插入数据,并且转换数据类型

   try:sql = "insert into swzc(uuid,xmbh,bdmc,gpjg,gpqsrq,gpjzrq,gpggq,bmjzsj,fbmtmc,bdszd,wqlb)values( '"+str(ncount)+"' ,'" +nlist_td[1].encode('GBK', 'ignore').decode('GBk') + "','" + nlist_td[0].encode('GBK', 'ignore').decode('GBk') + "','" + nlist_td[2].encode('GBK', 'ignore').decode('GBk') + "',to_date('"+nlist_td[3]+"','yyyy/mm/dd HH:MI:SS'),to_date('"+nlist_td[4]+"','yyyy/mm/dd HH:MI:SS'),'" + nlist_td[5].encode('GBK', 'ignore').decode('GBk') + "','" + nlist_td[6].encode('GBK', 'ignore').decode('GBk') + "','" + nlist_td[7].encode('GBK', 'ignore').decode('GBk') + "','" + nlist_td[8].encode('GBK', 'ignore').decode('GBk') + "','" +nlist_td[9].encode('GBK', 'ignore').decode('GBk')+ "')"test="to_date('"+nlist_td[3]+"','yyyy/mm/dd HH:MI:SS')"print(test)cur.execute(sql)except Exception as e:print(e)finally:pass

 

 二,获取网页内容

1.使用urllib库中的request包

from urllib.request import urlopen
import urllib.requestheaders = ("User-Agent","Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/63.0.3239.132 Safari/537.36")
opener = urllib.request.build_opener()
opener.addheaders = [headers]#将opener安装为全局
urllib.request.install_opener(opener)
thisurl = "http://gzw.guizhou.gov.cn/zwgk/xxgkml/zdlyxx/gzyy/index.json"
data = urllib.request.urlopen(thisurl).read().decode("utf-8","ignore")
#print(data)

2.使用requests包

import requestsheaders = {'User-Agent': 'Mozilla/5.0 (Windows NT 6.3; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/75.0.3770.100 Safari/537.36'}r = requests.get('http://www.prechina.net/channels/304.html', headers = headers)r.encoding = 'utf-8'

 三,提取所需内容,解析网页格式

1.beautifulSoup方式,该方式自动过滤网页中的标签,只提取text,省去了很多繁琐的标签过滤步骤

from bs4 import BeautifulSoupbs = BeautifulSoup(r.content.decode("utf-8"), "lxml")tables = bs.find_all('table',attrs={'class':'tables_01'})
#这种情况是所遍历的子网页中有两个表但第一个表不是每次都需要提取数据,因此建立一个数组格式存储表格,若为第一个网页则遍历两个表格,其他网页只遍历第一个 
if j == 1:table = [tables[1],tables[0]]
else:table =[tables[0]]
for m in range(0,len(table)):for tr in table[m].findAll("tr"):list_td = []#只需插入第一个td中的链接i = 0for td in tr.findAll("td"):list_td.append(td.getText())#该网页中可以提取链接的有两列,我们只取其中一个就好,因此每次遍历一个新的tr中td前用i判断一下if i == 0:m = td.find('a')['href']list_td.append(m)i = i + 1print(list_td)

2.通过正则表达式提取所需要的内容 

import re
from bs4 import BeautifulSouptables = bs.find_all('table',attrs={'class':'tables_01'})if j == 1:table = [tables[1],tables[0]]else:table =[tables[0]]for m in range(0,len(table)):for tr in table[m].findAll("tr"):list_td = []i = 0for td in tr.findAll("td"):if i == 0:m = td.find('a')['href']i = i + 1headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/75.0.3770.142 Safari/537.36'}rn = requests.get("http://www.prechina.net" + m, headers=headers)print("http://www.prechina.net" + m)#print(ncount)bsn = BeautifulSoup(rn.content.decode("utf-8","ignore"), "lxml")if len(bsn.text)>1000:tablesn = bsn.find_all('table', attrs={'class': 'tables_02'})table03 = tablesn[2]table01 = tablesn[0]nlist_td = []  # 只需插入第一个td中的链接pat = '项目编号</th>.*?<td>(.*?)</td>'rst = re.compile(pat, re.S).findall(table01.__str__())if len(re.compile('资产描述</td>.*?<td colspan=".*?">(.*?)</td>',re.S).findall(table03.__str__())) < 1:pat1 = '资产现状</td>.*?<td colspan=".*?">(.*?)</td>'else:pat1 = '资产描述</td>.*?<td colspan=".*?">(.*?)</td>'rst1 = re.compile(pat1,re.S).findall(table03.__str__())if len(re.compile('标的所在地</td>.*?<td colspan=".*?">(.*?)</td>', re.S).findall(table03.__str__())) <1:pat2 = '地理位置</td>.*?<td colspan=".*?">(.*?)</td>'else:pat2 = '标的所在地</td>.*?<td colspan=".*?">(.*?)</td>'rst2 = re.compile(pat2, re.S).findall(table03.__str__())pat3 = '权证编号</td>.*?<td colspan=".*?">(.*?)</td>'rst3 = re.compile(pat3, re.S).findall(table03.__str__())if len(rst3) < 1:rst3 = ['机械设备及其他类无权证编号']pat4 = '建筑面积</td>.*?<td colspan=".*?">(.*?)</td>'rst4 = re.compile(pat4, re.S).findall(table03.__str__())if len(rst4) < 1:rst4 = ['机械设备及其他类无建筑面积']pat5 = '抵押、租赁等他项权利情况</td>.*?<td colspan=".*?">(.*?)</td>'rst5 = re.compile(pat5, re.S).findall(table03.__str__())pat6 = '权利人是否行使优先受让权 </td>.*?<td colspan=".*?">(.*?)</td>'rst6 = re.compile(pat6, re.S).findall(table03.__str__())pat7 = '评估机构</td>.*?<td colspan=".*?">(.*?)</td>'rst7 = re.compile(pat7, re.S).findall(table03.__str__())pat8 = '核准(备案)机构</td>.*?<td colspan=".*?">(.*?)</td>'rst8 = re.compile(pat8, re.S).findall(table03.__str__())pat9 = '核准(备案)日期</td>.*?<td colspan=".*?">(.*?)</td>'rst9 = re.compile(pat9, re.S).findall(table03.__str__())pat10 = '评估基准日</td>.*?<td colspan=".*?">(.*?)</td>'rst10 = re.compile(pat10, re.S).findall(table03.__str__())mtd = table03.find_all('td', attrs={'colspan': '2','class':'th2'})if len(mtd) < 1:mtd = table03.find_all('td', attrs={'colspan': '4', 'class': 'th2'})if len(mtd[0].text) < 1:rst11 = " "else:rst11 = mtd[0].textmtd1 = table03.find_all('td', attrs={'colspan': '3', 'class': 'th2'})if len(mtd1) < 1:mtd1 = table03.find_all('td', attrs={'colspan': '5', 'class': 'th2'})if len(mtd[0].text) < 1:rst12 = " "else:rst12 = mtd[0].textpat13 = '转让标的对应评估值</td>.*?<td colspan=".*?">(.*?)</td>'rst13= re.compile(pat13, re.S).findall(table03.__str__())zyxx = table03.find_all('tr')zyxx01=zyxx[len(zyxx)-1].find_all('td')rst14=zyxx01[0].textnlist_td.extend(rst)nlist_td.extend(rst1)nlist_td.extend(rst2)nlist_td.extend(rst3)nlist_td.extend(rst4)nlist_td.extend(rst5)nlist_td.extend(rst6)nlist_td.extend(rst7)nlist_td.extend(rst8)nlist_td.extend(rst9)nlist_td.extend(rst10)nlist_td.append(rst11)nlist_td.append(rst12)nlist_td.extend(rst13)nlist_td.append(rst14) 

最后举一个爬取json文件的例子 ,多加了将json字符串转换成python字典这一步骤

# json解析库,对应到lxml
import json
# json的解析语法,对应到xpath
import jsonpath
from urllib.request import urlopen
import pymysql
import urllib.request
import requests
from bs4 import BeautifulSoup
import  re
#头文件格式header=("User-Agent",具体用户代理值)
conn = pymysql.connect(host='localhost', port=3306, user='root', passwd='***', db='***', charset='utf8')
cur = conn.cursor()
headers = ("User-Agent","Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/63.0.3239.132 Safari/537.36")
opener = urllib.request.build_opener()
opener.addheaders = [headers]
# data = opener.open(url).read().decode("utf-8","ignore")
#将opener安装为全局
urllib.request.install_opener(opener)
thisurl = "http://gzw.guizhou.gov.cn/zwgk/xxgkml/zdlyxx/gzyy/index.json"
data = urllib.request.urlopen(thisurl).read().decode("utf-8","ignore")
#print(data)
#将json字符串转换成python字典
json_dic = json.loads(data)syh = jsonpath.jsonpath(json_dic, "$..idxID")
title = jsonpath.jsonpath(json_dic, "$..Title")
PubDate = jsonpath.jsonpath(json_dic, "$..PubDate")
fileNum = jsonpath.jsonpath(json_dic, "$..fileNum")
url = jsonpath.jsonpath(json_dic, "$..url")
node_num=len(syh)
for i in range(0,node_num):if title[i].find('薪酬') != -1:print(syh[i], title[i], PubDate[i], fileNum[i], url[i])urlSec='http://gzw.guizhou.gov.cn/zwgk/xxgkml/zdlyxx/gzyy/' +url[i]#headers={#   'User-Agent': 'Mozilla/5.0 (Windows NT 6.3; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/75.0.3770.100 Safari/537.36'#}dataSec = urllib.request.urlopen(urlSec).read()dataSec = dataSec.decode("utf-8","ignore")#bs = BeautifulSoup( dataSec.content.decode("utf-8"), "lxml")#data = bs.find("div",{"class":"xxgk_xl_top"})#print(data)# for li in  bs.find("div",{"class":"xxgk_xl_top"}).findAll("li"):list = []pat1 = '索引号:</a>.*?<span>(.*?)</span>'rst1 = re.compile(pat1, re.S).findall(dataSec)  # 成功pat2 = '信息分类:</a>.*?<span>(.*?)</span>'rst2 = re.compile(pat2, re.S).findall(dataSec)pat3 = '发布机构:</a>.*?var str_1 = "(.*?)";'  # 把所有字段的正则写出来rst3 = re.compile(pat3, re.S).findall(dataSec)pat4 = '发文日期:</a>.*? var str_1 = "(.*?)";'rst4 = re.compile(pat4, re.S).findall(dataSec)pat5 = '文号:</a>.*?<span>(.*?)</span>'rst5 = re.compile(pat5, re.S).findall(dataSec)pat6 = "是否有效:</a>.*?isok='(.*?)';"  # 1或空为是,2为否rst6 = re.compile(pat6, re.S).findall(dataSec)if rst6 == '' or '1':rst6 = '是'elif rst6 == '2':rst6 = '否'pat7 = '信息名称:</a>.*?<span>(.*?)</span>'rst7 = re.compile(pat7, re.S).findall(dataSec)list.append(rst1)list.append(rst2)list.append(rst3)list.append(rst4)list.append(rst5)list.append(rst6)list.append(rst7)print(list)if list.__len__() > 6:try:cur.execute("USE ssm_crud")cur.execute('SET NAMES utf8')cur.execute('SET CHARACTER SET utf8')cur.execute('SET character_set_connection=utf8')cur.execute('insert into GYZC_XCUL(索引号,信息分类,发布机构,发文日期,文号,是否有效,信息名称) values(%s,%s,%s,%s,%s,%s,%s)',[list[0], list[1], list[2], list[3], list[4], list[5], list[6]])except Exception as e:print(e)finally:pass
conn.commit()
# 以下两步把游标与数据库连接都关闭,这也是必须的!
cur.close()
conn.close()