作者:可爱的天使keven_464 | 2021-09-03 02:54
python基于新浪sae开发的微信公众平台,实现功能:
输入段子---回复笑话
输入开源+文章---发送消息到开源中国
输入快递+订单号---查询快递信息
输入天气---查询南京最近五天天气状况
输入微博热点---回复微博当前热门话题
输入电影+名称---回复百度云盘中搜索的链接
具体实现代码:
# -*- coding: utf-8 -*-
import hashlib
import web
import lxml
import time
import os
import urllib2,json
import urllib
import re
import random
import hashlib
import cookielib
from urllib import urlencode
from lxml import etree
class WeixinInterface:
def __init__(self):
self.app_root = os.path.dirname(__file__)
self.templates_root = os.path.join(self.app_root, 'templates')
self.render = web.template.render(self.templates_root)
def GET(self):
#获取输入参数
data = web.input()
signature=data.signature
timestamp=data.timestamp
nonce=data.nonce
echostr=data.echostr
#自己的token
token="weixin9047" #这里改写你在微信公众平台里输入的token
#字典序排序
list=[token,timestamp,nonce]
list.sort()
sha1=hashlib.sha1()
map(sha1.update,list)
hashcode=sha1.hexdigest()
#sha1加密算法
#如果是来自微信的请求,则回复echostr
if hashcode == signature:
return echostr
def POST(self):
str_xml = web.data() #获得post来的数据
xml = etree.fromstring(str_xml)#进行XML解析
content=xml.find("Content").text#获得用户所输入的内容
msgType=xml.find("MsgType").text
fromUser=xml.find("FromUserName").text
toUser=xml.find("ToUserName").text
if(content == u"天气"):
url = "http://m.ip138.com/21/nanjing/tianqi/"
headers = {
'Connection': 'Keep-Alive',
'Accept': 'text/html, application/xhtml+xml, */*',
'Accept-Language': 'en-US,en;q=0.8,zh-Hans-CN;q=0.5,zh-Hans;q=0.3',
'User-Agent': 'Mozilla/5.0 (Windows NT 6.3; WOW64; Trident/7.0; rv:11.0) like Gecko'}
req = urllib2.Request(url, headers = headers)
opener = urllib2.urlopen(req)
html = opener.read()
rex = r'(?<=img src="/image/s[0-9].gif" alt=").{1,6}(?=" />)'
rexx = r'(?<=p class="temperature">).{5,15}(?=)'
n = re.findall(rex,html)
m = re.findall(rexx,html)
str_wether = ""
for (i,j) in zip(m,n):
str_wether = str_wether + j + " " +i + "\n"
return self.render.reply_text(fromUser,toUser,int(time.time()),"最近五天天气:\n"+str_wether)
elif(content[0:2] == u"电影"):
keyword = urllib.quote(content[2:].encode("utf-8"))
url = "http://www.wangpansou.cn/s.php?q="+keyword
headers = {
'Connection': 'Keep-Alive',
'Accept': 'text/html, application/xhtml+xml, */*',
'Accept-Language': 'en-US,en;q=0.8,zh-Hans-CN;q=0.5,zh-Hans;q=0.3',
'User-Agent': 'Mozilla/5.0 (Windows NT 6.3; WOW64; Trident/7.0; rv:11.0) like Gecko'}
req = urllib2.Request(url, headers = headers)
opener = urllib2.urlopen(req)
html = opener.read()
rex = r'https?://pan.baidu.com.*\?uk=[0-9]{10}.*[\d+?]"'
m = re.findall(rex,html)
string = u""
for i in m:
string = string + i + "\n"
return self.render.reply_text(fromUser,toUser,int(time.time()),u"以下是电影链接:\n"+string)
elif(u"段子" in content):
url_8 = "http://www.qiushibaike.com/"
url_24 = "http://www.qiushibaike.com/hot/"
headers = {
'Connection': 'Keep-Alive',
'Accept': 'text/html, application/xhtml+xml, */*',
'Accept-Language': 'en-US,en;q=0.8,zh-Hans-CN;q=0.5,zh-Hans;q=0.3',
'User-Agent': 'Mozilla/5.0 (Windows NT 6.3; WOW64; Trident/7.0; rv:11.0) like Gecko'}
req_8 = urllib2.Request(url_8, headers = headers)
req_24 = urllib2.Request(url_24,headers = headers)
opener_8 = urllib2.urlopen(req_8)
opener_24 = urllib2.urlopen(req_24)
html_8 = opener_8.read()
html_24 = opener_24.read()
rex = r'(?<=p class="content">).*?(?=
可爱的天使keven_464
这个屌丝很懒,什么也没留下!