资讯详情

关键词挖掘采集python采集关键词挖掘数据SEO必备工具

关键词收集工具seo必备工具,python收集关键字挖掘数据

001

002

003

004

005

006

007

008

009

010

011

012

013

014

015

016

017

018

019

020

021

022

023

024

025

026

027

028

029

030

031

032

033

034

035

036

037

038

039

040

041

042

043

044

045

046

047

048

049

050

051

052

053

054

055

056

057

058

059

060

061

062

063

064

065

066

067

068

069

070

071

072

073

074

075

076

077

078

079

080

081

082

083

084

085

086

087

088

089

090

091

092

093

094

095

096

097

098

099

100

101

102

103

104

105

106

107

108

109

110

111

112

113

114

115

116

117

118

119

120

121

122

123

124

125

126

127

128

129

130

131

132

133

134

135

136

137

138

139

140

141

142

143

144

145

146

147

148

149

150

151

152

153

154

155

156

157

158

159

160

161

162

163

164

165

166

167

168

169

170

171

#站长工具关键词挖掘

# -*- coding=utf-8 -*-

importrequests

from lxmlimportetree

importre

importxlwt

importtime

headers={

'User-Agent':'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_7_3) AppleWebKit/534.55.3 (KHTML, like Gecko) Version/5.1.3 Safari/534.53.10'

}

#查询关键字是否能找到相关关键字

def search_keyword(keyword):

data={

'kw': keyword,

'page':'1',

'by':'0',

}

url="http://stool.chinaz.com/baidu/words.aspx"

html=requests.post(url,data=data,headers=headers).text

time.sleep(3)

#print(html)

con=etree.HTML(html)

key_result=con.xpath('//div[@class="col-red lh30 fz14 tc"]/text()')

try:

key_result=key_result[0] ##找不到相关关键词

except:

key_result=[]

#print(key_result)

return key_result

#获取关键词页数和记录条数

def get_page_number(keyword):

data = {

'kw': keyword,

&bsp;    'page''1',

        'by''0',

    }

    url = "http://stool.chinaz.com/baidu/words.aspx"

    html = requests.post(url, data=data, headers=headers).text

    time.sleep(3)

    # print(html)

    con = etree.HTML(html)

    page_num = con.xpath('//span[@class="col-gray02"]/text()')

    page_numberze = r'共(.+?)页'

    page_number = re.findall(page_numberze, page_num[0], re.S)

    page_number = page_number[0]

    #print(page_number)

    total_data = con.xpath('//p[@class="col-gray lh24 fr pr5"]')  # 数据记录

    total_datas = total_data[0].xpath('string(.)')  # 获取节点所有文本

    #print(total_datas)

    print(f'挖掘关键词:{keyword}-{total_datas}')

    return page_number

#获取关键词数据

def get_keyword_datas(keyword,page_number):

    datas_list = []

    for in range(1,page_number+1):

        print(f'正在采集第{i}页关键词挖掘数据...')

        data = {

            'kw': keyword,

            'page': i,

            'by''0',

        }

        #print(data)

        url = "http://stool.chinaz.com/baidu/words.aspx"

        html = requests.post(url, data=data, headers=headers).text

        time.sleep(3)

        #print(html)

        con = etree.HTML(html)

        key_words = con.xpath('//p[@class="midImg"]/a/span/text()')  # 关键词

        #print(key_words)

        keyword_all_datas = []

        keyword_datas = con.xpath('//ul[@class="ResultListWrap "]/li/div[@class="w8-0"]/a')

        for keyword_data in keyword_datas:

            keyword_data = keyword_data.text

            if keyword_data != None:

                keyword_all_datas.append(keyword_data)

        #print(keyword_all_datas)

        overall_indexs = keyword_all_datas[0::5]  # 整体指数

        #print(overall_indexs )

        pc_indexs = keyword_all_datas[1::5]  # pc指数

        #print(pc_indexs)

        mobile_indexs = keyword_all_datas[2::5]  # 移动指数

        #print(mobile_indexs)

        s360_indexs = keyword_all_datas[3::5]  # 360指数

        #print(s360_indexs)

        collections = keyword_all_datas[4::5]  # 收录量

        #print(collections)

        ips = con.xpath('//ul[@class="ResultListWrap "]/li/div[@class="w15-0 kwtop"]/text()') # 预估流量

        if ips==[]:

            ips =['--']

        #print(ips)

        first_place_hrefs = con.xpath(

            '//ul[@class="ResultListWrap "]/li/div[@class="w18-0 lh24 tl"]/a/text()')  # 首页位置链接

        if first_place_hrefs==[]:

            first_place_hrefs=con.xpath('//ul[@class="ResultListWrap "]/li/div[@class="w18-0 lh24 tl"]/text()')

        #print(first_place_hrefs)

        first_place_titles = con.xpath(

            '//ul[@class="ResultListWrap "]/li/div[@class="w18-0 lh24 tl"]/p[@class="lh17 pb5"]/text()')  # 首页位置标题

        if first_place_titles == []:

            first_place_titles=['--']

        #print(first_place_titles)

        data_list = []

        for key_word, overall_index, pc_index, mobile_index, s360_index, collection, ip, first_place_href, first_place_title in zip(

                key_words, overall_indexs, pc_indexs, mobile_indexs, s360_indexs, collections, ips, first_place_hrefs,

                first_place_titles

        ):

            data = [

                key_word,

                overall_index,

                pc_index,

                mobile_index,

                s360_index,

                collection,

                ip,

                first_place_href,

                first_place_title,

            ]

            print(data)

            print('\n')

            data_list.append(data)

            time.sleep(3)

        datas_list.extend(data_list) #合并关键词数据

    return datas_list

#保存关键词数据为excel格式

def bcsj(keyword,data):

    workbook = xlwt.Workbook(encoding='utf-8')

    booksheet = workbook.add_sheet('Sheet 1', cell_overwrite_ok=True)

    title = [['关键词''整体指数''PC指数''移动指数''360指数''预估流量(ip)''收录量''网站首位链接''网站首位标题']]

    title.extend(data)

    #print(title)

    for i, row in enumerate(title):

        for j, col in enumerate(row):

            booksheet.write(i, j, col)

    workbook.save(f'{keyword}.xls')

    print(f"保存关键词数据为 {keyword}.xls 成功!")

if __name__ == '__main__':

    keyword = input('请输入关键词>>')

    print('正在查询,请稍后...')

    result=search_keyword(keyword)

    if result=="没有找到相关的关键字":

        print('\n')

        print (result)

        print("该关键词没有挖掘到关键词数据")

    else:

        print('\n')

        page_number=get_page_number(keyword)

        print('\n')

        print('正在采集关键词挖掘数据,请稍后...')

        print('\n')

        page_number=int(page_number)

        datas_list=get_keyword_datas(keyword,page_number)

        print('\n')

        print('关键词挖掘数据采集结果:')

        print('========================采集结果========================\n\n')

        for datas in datas_list:

            print(datas)

        print('\n\n========================采集结束========================\n')

        bcsj(keyword, datas_list)

标签: 5w15kr电阻

锐单商城拥有海量元器件数据手册IC替代型号,打造 电子元器件IC百科大全!

锐单商城 - 一站式电子元器件采购平台