python連接redis並插入url


 

 

#!/usr/bin/env python
# -*- coding:utf8 -*-

import redis

'''
這種連接是連接一次就斷了,耗資源.端口默認6379,就不用寫
r = redis.Redis(host='127.0.0.1',port=6379,password='tianxuroot')
r.set('name','root')

print(r.get('name').decode('utf8'))
'''
'''
連接池:
當程序創建數據源實例時,系統會一次性創建多個數據庫連接,並把這些數據庫連接保存在連接池中,當程序需要進行數據庫訪問時,
無需重新新建數據庫連接,而是從連接池中取出一個空閑的數據庫連接
'''
pool = redis.ConnectionPool(host='127.0.0.1',password='helloworld')   #實現一個連接池

r = redis.Redis(connection_pool=pool)
r.set('foo','bar')
print(r.get('foo').decode('utf8'))

 

 

from bs4 import BeautifulSoup
import requests
from lxml import etree
import redis


pool = redis.ConnectionPool(host='127.0.0.1', port=6379)
r = redis.Redis(connection_pool=pool)
# r = Redis.from_url("redis://127.0.0.1:6379", decode_responses=True)

def get_urls(url):
    result = requests.get(url)
    selector = etree.HTML(result.text)
    links = selector.xpath(r'//*[@id="archive"]/div/div[2]/p[1]/a[1]/@href')
    for link in links:
        r.sadd("first_urlsss", link)
    next_url = extract_next_url(result.text)
    if next_url:
        get_urls(next_url)


def extract_next_url(html):

    soup = BeautifulSoup(html, "lxml")
    next_url = soup.select('a[class="next page-numbers"]')
    for url in next_url:

        url = str(url)
        soup = BeautifulSoup(url, "lxml")
        next_url = soup.a["href"]
    return next_url


if __name__ == '__main__':
    url = "http://python.jobbole.com/all-posts/"
    get_urls(url)

 


免責聲明!

本站轉載的文章為個人學習借鑒使用,本站對版權不負任何法律責任。如果侵犯了您的隱私權益,請聯系本站郵箱yoyou2525@163.com刪除。



 
粵ICP備18138465號   © 2018-2025 CODEPRJ.COM