Archived
1
0
Fork 0

use aiohttp instead of request

This commit is contained in:
yzhh 2020-08-12 16:01:00 +08:00
parent e5ebeb90e2
commit ce8e3d5da5
19 changed files with 139 additions and 322 deletions

View file

@ -7,7 +7,7 @@ from UTC8 import UTC8
async def ab():
url = 'https://minecraft-zh.gamepedia.com/api.php?action=query&list=abuselog&aflprop=user|title|action|result|filter|timestamp&format=json'
async with aiohttp.ClientSession() as session:
async with session.get(url) as req:
async with session.get(url,timeout=aiohttp.ClientTimeout(total=5)) as req:
if req.status != 200:
return f"请求发生时错误:{req.status}"
else:

View file

@ -1,24 +1,8 @@
from .bugtracker import bug
from .bugtrackerbc import bugcb
from .bugtrackergc import buggc
import re
async def bugtracker(name):
try:
if name.find(" -h") != -1:
return('''~bug <JiraID> - 从Mojira中获取此Bug的信息。''')
elif name.find(" -g") != -1:
name = re.sub(' -g','',name)
q = re.match(r'^bug (.*\-.*)', name)
return (await buggc(q.group(1)))
elif name.find(" -b") != -1:
name = re.sub(' -b', '', name)
q = re.match(r'^bug (.*\-.*)', name)
return (await bugcb(q.group(1)))
else:
try:
q = re.match(r'^bug (.*)\-(.*)', name)
return(await bug(q.group(1)+'-'+q.group(2)))
except Exception:
return ('未知语法,请使用~bug -h获取帮助。')
q = re.match(r'^bug (.*)\-(.*)', name)
return(await bug(q.group(1)+'-'+q.group(2)))
except Exception as e:
return (str(e))

View file

@ -1,22 +1,21 @@
# -*- coding:utf-8 -*-
import requests
from xml.etree import ElementTree
import json
import string
import os, sys
import aiohttp
async def bug(pagename):
try:
try:
os.remove('bug_cache_text.txt')
except Exception:
pass
url_str ='https://bugs.mojang.com/si/jira.issueviews:issue-xml/'+ str.upper(pagename) + '/' + str.upper(pagename) + '.xml'
respose_str = requests.get(url_str,timeout=10)
respose_str = requests.get(url_str,timeout=10)
async with aiohttp.ClientSession() as session:
async with session.get(url_str,timeout=aiohttp.ClientTimeout(total=20)) as req:
if req.status != 200:
return f"请求发生时错误:{req.status}"
else:
respose_str = await req.text()
respose_str = await req.text()
try:
respose_str.encoding = 'utf-8'
root = ElementTree.XML(respose_str.text)
root = ElementTree.XML(respose_str)
for node in root.iter("channel"):
for node in root.iter("item"):
Title = node.find("title").text
@ -26,8 +25,13 @@ async def bug(pagename):
Resolution = "Resolution: " + node.find("resolution").text
Link = node.find("link").text
url_json = 'https://bugs.mojang.com/rest/api/2/issue/'+str.upper(pagename)
json_text = requests.get(url_json,timeout=10)
file = json.loads(json_text.text)
async with aiohttp.ClientSession() as session2:
async with session2.get(url_json,timeout=aiohttp.ClientTimeout(total=5)) as reqjson:
if reqjson.status != 200:
return f"请求发生时错误:{reqjson.status}"
else:
json_text = await reqjson.text()
file = json.loads(json_text)
Versions = file['fields']['versions']
name = []
for item in Versions[:]:

View file

@ -1,81 +0,0 @@
# -*- coding:utf-8 -*-
import requests
from xml.etree import ElementTree
import json
import string
import os, sys
import http.client
import hashlib
import urllib
import random
async def bugcb(pagename):
appid = '20200328000407172'
secretKey = '9wUEKfwOtQsMh_2Ozr7R'
httpClient = None
myurl = '/api/trans/vip/translate'
fromLang = 'en' #原文语种
toLang = 'zh'
salt = random.randint(32768, 65536)
try:
try:
try:
os.remove('bug_cache_text.txt')
except Exception:
pass
url_str ='https://bugs.mojang.com/si/jira.issueviews:issue-xml/'+ str.upper(pagename) + '/' + str.upper(pagename) + '.xml'
respose_str = requests.get(url_str,timeout=10)
respose_str = requests.get(url_str,timeout=10)
try:
respose_str.encoding = 'utf-8'
root = ElementTree.XML(respose_str.text)
for node in root.iter("channel"):
for node in root.iter("item"):
Title = node.find("title").text
q = node.find("title").text
Type = "类型:" + node.find("type").text
Project = "项目:" + node.find("project").text
TStatus = "进度:" + node.find("status").text
Resolution = "状态:" + node.find("resolution").text
Link = node.find("link").text
sign = appid + q + str(salt) + secretKey
sign = hashlib.md5(sign.encode()).hexdigest()
myurl = myurl + '?appid=' + appid + '&q=' + urllib.parse.quote(q) + '&from=' + fromLang + '&to=' + toLang + '&salt=' + str(salt) + '&sign=' + sign +'&action=1'
url_json = 'https://bugs.mojang.com/rest/api/2/issue/'+str.upper(pagename)
json_text = requests.get(url_json,timeout=10)
file = json.loads(json_text.text)
Versions = file['fields']['versions']
name = []
for item in Versions[:]:
name.append(item['name'])
httpClient = http.client.HTTPConnection('api.fanyi.baidu.com')
httpClient.request('GET', myurl)
response = httpClient.getresponse()
result_all = response.read().decode("utf-8")
result = json.loads(result_all)
for item in result['trans_result']:
dst=item['dst']
if name[0] == name[-1]:
Version = "Version: "+name[0]
else:
Version = "Versions: "+name[0]+"~"+name[-1]
try:
Priority = "Mojang Priority: "+file['fields']['customfield_12200']['value']
return(Title+'\n'+dst+'\n'+Type+'\n'+Project+'\n'+TStatus+'\n'+Priority+'\n'+Resolution+'\n'+Version+'\n'+Link+'\n'+'由百度翻译提供支持。')
except Exception:
return(Title+'\n'+dst+'\n'+Type+'\n'+Project+'\n'+TStatus+'\n'+Resolution+'\n'+Version+'\n'+Link+'\n'+'由百度翻译提供支持。')
except Exception:
try:
return(Title+'\n'+dst+'\n'+Type+'\n'+Project+'\n'+TStatus+'\n'+Priority+'\n'+Resolution+'\n'+Link+'\n'+'由百度翻译提供支持。')
except Exception:
try:
return(Title+'\n'+dst+'\n'+Type+'\n'+Project+'\n'+TStatus+'\n'+Resolution+'\n'+Link+'\n'+'由百度翻译提供支持。')
except Exception:
try:
return(Link)
except Exception as e:
return("发生错误:"+str(e)+".")
except Exception as e:
return("发生错误:"+str(e)+".")
except Exception as e:
return("发生错误:"+str(e)+".")

View file

@ -1,59 +0,0 @@
# -*- coding:utf-8 -*-
import requests
from xml.etree import ElementTree
import json
import string
import os, sys
from googletrans import Translator
tr = Translator(timeout=10)
async def buggc(pagename):
try:
try:
os.remove('bug_cache_text.txt')
except Exception:
pass
url_str ='https://bugs.mojang.com/si/jira.issueviews:issue-xml/'+ str.upper(pagename) + '/' + str.upper(pagename) + '.xml'
respose_str = requests.get(url_str,timeout=10)
respose_str = requests.get(url_str,timeout=10)
try:
respose_str.encoding = 'utf-8'
root = ElementTree.XML(respose_str.text)
for node in root.iter("channel"):
for node in root.iter("item"):
Title = node.find("title").text
Titleg = tr.translate(node.find("title").text,dest='zh-cn').text
Type = "类型:" + node.find("type").text
Project = "项目:" + node.find("project").text
TStatus = "进度:" + tr.translate(str(node.find("status").text),dest='zh-cn').text
Resolution = "状态:" + tr.translate(str(node.find("resolution").text),dest='zh-cn').text
Link = node.find("link").text
url_json = 'https://bugs.mojang.com/rest/api/2/issue/'+str.upper(pagename)
json_text = requests.get(url_json,timeout=10)
file = json.loads(json_text.text)
Versions = file['fields']['versions']
name = []
for item in Versions[:]:
name.append(item['name'])
if name[0] == name[-1]:
Version = "Version: "+name[0]
else:
Version = "Versions: "+name[0]+"~"+name[-1]
try:
Priority = "Mojang优先级"+tr.translate(file['fields']['customfield_12200']['value'],dest='zh-cn').text
return(Title+'\n'+Titleg+'\n'+Type+'\n'+Project+'\n'+TStatus+'\n'+Priority+'\n'+Resolution+'\n'+Version+'\n'+Link+'\n'+"由Google翻译提供支持。")
except Exception:
return(Title+'\n'+Titleg+'\n'+Type+'\n'+Project+'\n'+TStatus+'\n'+Resolution+'\n'+Version+'\n'+Link+'\n'+"由Google翻译提供支持。")
except Exception:
try:
return(Title+'\n'+Titleg+'\n'+Type+'\n'+Project+'\n'+TStatus+'\n'+Priority+'\n'+Resolution+'\n'+Link+'\n'+"由Google翻译提供支持。")
except Exception:
try:
return(Title+'\n'+Titleg+'\n'+Type+'\n'+Project+'\n'+TStatus+'\n'+Resolution+'\n'+Link+'\n'+"由Google翻译提供支持。")
except Exception:
try:
return(Link)
except Exception as e:
return("发生错误:"+str(e)+".")
except Exception as e:
return("发生错误:"+str(e)+".")

View file

@ -1,77 +0,0 @@
# -*- coding:utf-8 -*-
import requests
from xml.etree import ElementTree
import json
import string
import os, sys
from googletrans import Translator
tr = Translator(timeout=10)
pagename = 'MC-4'
try:
try:
os.remove('bug_cache_text.txt')
except Exception:
pass
url_str ='https://bugs.mojang.com/si/jira.issueviews:issue-xml/'+ str.upper(pagename) + '/' + str.upper(pagename) + '.xml'
respose_str = requests.get(url_str,timeout=10)
try:
respose_str.encoding = 'utf-8'
root = ElementTree.XML(respose_str.text)
for node in root.iter("channel"):
for node in root.iter("item"):
Title = node.find("title").text
Titleg = tr.translate(node.find("title").text,dest='zh-cn').text
Type = "类型:" + node.find("type").text
Project = "项目:" + node.find("project").text
TStatus = "进度:" + tr.translate(str(node.find("status").text),dest='zh-cn').text
Resolution = "状态:" + tr.translate(str(node.find("resolution").text),dest='zh-cn').text
Link = node.find("link").text
url_json = 'https://bugs.mojang.com/rest/api/2/issue/'+str.upper(pagename)
json_text = requests.get(url_json,timeout=10)
file = json.loads(json_text.text)
Versions = file['fields']['versions']
for item in Versions[:]:
name = item['name']+"|"
y = open('bug_cache_text.txt',mode='a',encoding='utf-8')
y.write(name)
y.close()
z = open('bug_cache_text.txt',mode='r',encoding='utf-8')
j = z.read()
m = j.strip(string.punctuation)
if m.split('|')[0] == m.split('|')[-1]:
Version = "版本:"+m.split('|')[0]
else:
Version = "版本:"+m.split('|')[0]+"~"+m.split('|')[-1]
try:
Priority = "Mojang优先级"+tr.translate(file['fields']['customfield_12200']['value'],dest='zh-cn').text
print(Title+'\n'+Titleg+'\n'+Type+'\n'+Project+'\n'+TStatus+'\n'+Priority+'\n'+Resolution+'\n'+Version+'\n'+Link+'\n'+"由Google翻译提供支持。")
z.close()
os.remove('bug_cache_text.txt')
except Exception:
print(Title+'\n'+Titleg+'\n'+Type+'\n'+Project+'\n'+TStatus+'\n'+Resolution+'\n'+Version+'\n'+Link+'\n'+"由Google翻译提供支持。")
except Exception:
try:
respose_str.encoding = 'utf-8'
root = ElementTree.XML(respose_str.text)
for node in root.iter("channel"):
for node in root.iter("item"):
Title = node.find("title").text
Titleg = tr.translate(node.find("title").text,dest='zh-cn').text
Type = "类型:" + node.find("type").text
TStatus = "进度:" + tr.translate(node.find("status").text,dest='zh-cn').text
Resolution = "状态:" + tr.translate(node.find("resolution"),dest='zh-cn').text
Priority = "优先级:" + tr.translate(node.find("priority"),dest='zh-cn').text
Link = node.find("link").text
print(Title+'\n'+Titleg+'\n'+Type+'\n'+TStatus+'\n'+Priority+'\n'+Resolution+'\n'+Link+'\n'+"由Google翻译提供支持。")
except Exception:
try:
respose_str.encoding = 'utf-8'
root = ElementTree.XML(respose_str.text)
for node in root.iter("channel"):
for node in root.iter("item"):
Link = node.find("link").text
print(Link)
except Exception as e:
print("发生错误:"+str(e)+".")
except Exception as e:
print("发生错误:"+str(e)+".")

View file

@ -1,7 +1,7 @@
path = '~'
async def help():
return(f'''{path}ab - 查看Minecraft Wiki过滤器日志。
{path}bug -h
{path}bug <JiraID> - 从Mojira中获取此Bug的信息
{path}mcv - 获取当前Minecraft Java版最新版本
{path}mcbv - 获取当前Minecraft基岩版最新版本
{path}mcdv - 获取当前Minecraft Dungeons最新版本

View file

@ -4,7 +4,7 @@ import aiohttp
async def get_data(url: str, fmt: str):
async with aiohttp.ClientSession() as session:
async with session.get(url) as req:
async with session.get(url,timeout=aiohttp.ClientTimeout(total=20)) as req:
if hasattr(req, fmt):
return await getattr(req, fmt)()
else:

View file

@ -6,7 +6,7 @@ import re
async def new():
url = 'https://minecraft-zh.gamepedia.com/api.php?action=query&list=logevents&letype=newusers&format=json'
async with aiohttp.ClientSession() as session:
async with session.get(url) as req:
async with session.get(url,timeout=aiohttp.ClientTimeout(total=20)) as req:
if req.status != 200:
return f"请求发生时错误:{req.status}"
else:

View file

@ -7,7 +7,7 @@ from UTC8 import UTC8
async def rc():
url = 'https://minecraft-zh.gamepedia.com/api.php?action=query&list=recentchanges&rcprop=title|user|timestamp&rctype=edit|new&format=json'
async with aiohttp.ClientSession() as session:
async with session.get(url) as req:
async with session.get(url,timeout=aiohttp.ClientTimeout(total=20)) as req:
if req.status != 200:
return f"请求发生时错误:{req.status}"
else:

View file

@ -1,5 +1,5 @@
import re
import requests
import aiohttp
import json
from .be import main
async def server(address):
@ -18,8 +18,13 @@ async def server(address):
try:
url = 'http://motd.wd-api.com/?ip='+serip+'&port='+port1+'&mode=info'
motd = requests.get(url,timeout=5)
file = json.loads(motd.text)
async with aiohttp.ClientSession() as session:
async with session.get(url,timeout=aiohttp.ClientTimeout(total=20)) as req:
if req.status != 200:
print(f"请求发生时错误:{req.status}")
else:
motd = await req.text()
file = json.loads(motd)
try:
if file['code'] == 200:
x=re.sub(r'§\w',"",file['data']['description']['text'])

View file

@ -1,5 +1,5 @@
import re
import requests
import aiohttp
import json
from .be import main
async def serverraw(address):
@ -18,8 +18,13 @@ async def serverraw(address):
try:
url = 'http://motd.wd-api.com/?ip='+serip+'&port='+port1+'&mode=info'
motd = requests.get(url,timeout=5)
file = json.loads(motd.text)
async with aiohttp.ClientSession() as session:
async with session.get(url,timeout=aiohttp.ClientTimeout(total=20)) as req:
if req.status != 200:
print(f"请求发生时错误:{req.status}")
else:
motd = await req.text()
file = json.loads(motd)
try:
if file['code'] == 200:
x = file['data']['description']['text']

View file

@ -14,7 +14,7 @@ async def Username(name):
w = q.group(1)
if w in iwlist():
url = iwlink(w)
return (rUser1(url, q.group(2)))
return (await rUser1(url, q.group(2)))
else:
return('未知语言,请使用~user -h查看帮助。')
except:
@ -22,7 +22,7 @@ async def Username(name):
try:
s = re.match(r'~(.*?) (.*)', q.group(1))
metaurl = 'https://' + s.group(1) + '.gamepedia.com/'
return (rUser1(metaurl, s.group(2)))
return (await rUser1(metaurl, s.group(2)))
except:
try:
i = re.match(r'(.*?):(.*)',q.group(1))
@ -31,7 +31,7 @@ async def Username(name):
if w in iwlist():
try:
metaurl = 'https://minecraft-' + w + '.gamepedia.com/'
return (rUser1(metaurl, x))
return (await rUser1(metaurl, x))
except Exception as e:
return ('发生错误:' + str(e))
else:
@ -42,14 +42,14 @@ async def Username(name):
return ('发生错误:' + str(e))
except Exception:
metaurl = 'https://minecraft.gamepedia.com/'
return (rUser1(metaurl, q.group(1)))
return (await rUser1(metaurl, q.group(1)))
else:
try:
q = re.match(r'^user-(.*?) (.*)', name)
w = q.group(1)
if w in iwlist():
url = iwlink(w)
return (User1(url, q.group(2)))
return (await User1(url, q.group(2)))
else:
return('未知语言,请使用~user -h查看帮助。')
except:
@ -57,7 +57,7 @@ async def Username(name):
try:
s = re.match(r'~(.*?) (.*)', q.group(1))
metaurl = 'https://' + s.group(1) + '.gamepedia.com/'
return (User1(metaurl, s.group(2)))
return (await User1(metaurl, s.group(2)))
except:
try:
i = re.match(r'(.*?):(.*)',q.group(1))
@ -66,16 +66,16 @@ async def Username(name):
if w in iwlist():
try:
metaurl = iwlink(w)
return (User1(metaurl, x))
return (await User1(metaurl, x))
except Exception as e:
return ('发生错误:' + str(e))
else:
try:
metaurl = 'https://minecraft.gamepedia.com/'
return (User1(metaurl, x))
return (await User1(metaurl, x))
except Exception as e:
return ('发生错误:' + str(e))
except Exception:
metaurl = 'https://minecraft.gamepedia.com/'
return (User1(metaurl, q.group(1)))
return (await User1(metaurl, q.group(1)))

View file

@ -1,22 +1,29 @@
import json
import re
import requests
import aiohttp
from UTC8 import UTC8
from .yhz import yhz
from .gender import gender
import re
import urllib
from bs4 import BeautifulSoup as bs
def rUser1(url, str3):
async def get_data(url: str, fmt: str):
async with aiohttp.ClientSession() as session:
async with session.get(url,timeout=aiohttp.ClientTimeout(total=20)) as req:
if hasattr(req, fmt):
return await getattr(req, fmt)()
else:
raise ValueError(f"NoSuchMethod: {fmt}")
async def rUser1(url, str3):
q = str3
url1 = url+'api.php?action=query&list=users&ususers=' + q + '&usprop=groups%7Cblockinfo%7Cregistration%7Ceditcount%7Cgender&format=json'
url2 = url+'api.php?action=query&meta=allmessages&ammessages=mainpage&format=json'
s = requests.get(url1, timeout=10)
file = json.loads(s.text)
c = requests.get(url2, timeout=10)
file2 = json.loads(c.text)
file = await get_data(url1,'json')
file2 = await get_data(url2,'json')
url3 = url + 'UserProfile:' + q
res = requests.get(url3, timeout=10)
res = await get_data(url3,'text')
try:
Wikiname = file2['query']['allmessages'][0]['*']
except Exception:
@ -30,7 +37,7 @@ def rUser1(url, str3):
Blockedtimestamp = UTC8(file['query']['users'][0]['blockedtimestamp'],'full')
Blockexpiry = UTC8(str(file['query']['users'][0]['blockexpiry']),'full')
Blockreason = str(file['query']['users'][0]['blockreason'])
soup = bs(res.text, 'html.parser')
soup = bs(res, 'html.parser')
stats = soup.find('div', class_='section stats')
point = soup.find('div', class_='score').text
dd = stats.find_all('dd')
@ -48,7 +55,7 @@ def rUser1(url, str3):
Group = '用户组:' + yhz(str(file['query']['users'][0]['groups']))
Gender = '性别:' + gender(file['query']['users'][0]['gender'])
Registration = '注册时间:' + UTC8(file['query']['users'][0]['registration'],'full')
soup = bs(res.text, 'html.parser')
soup = bs(res, 'html.parser')
stats = soup.find('div', class_='section stats')
point = soup.find('div', class_='score').text
dd = stats.find_all('dd')

View file

@ -1,19 +1,26 @@
import json
import re
import requests
import aiohttp
from UTC8 import UTC8
from .yhz import yhz
from .gender import gender
import re
import urllib
def User1(url, str3):
async def get_data(url: str, fmt: str):
async with aiohttp.ClientSession() as session:
async with session.get(url,timeout=aiohttp.ClientTimeout(total=20)) as req:
if hasattr(req, fmt):
return await getattr(req, fmt)()
else:
raise ValueError(f"NoSuchMethod: {fmt}")
async def User1(url, str3):
q = str3
url1 = url+'api.php?action=query&list=users&ususers=' + q + '&usprop=groups%7Cblockinfo%7Cregistration%7Ceditcount%7Cgender&format=json'
url2 = url+'api.php?action=query&meta=allmessages&ammessages=mainpage&format=json'
s = requests.get(url1, timeout=10)
file = json.loads(s.text)
c = requests.get(url2, timeout=10)
file2 = json.loads(c.text)
file = await get_data(url1,'json')
file2 = await get_data(url2,'json')
try:
Wikiname = file2['query']['allmessages'][0]['*']
except Exception:

View file

@ -1,6 +1,6 @@
from wiki import im
from .puserlib import PUser1, PUser1ban, PUser1bann
import requests
import aiohttp
import json
import re
from .gender import gender
@ -13,8 +13,13 @@ async def Userp(path,Username):
q = re.sub('_', ' ', q)
metaurl = 'https://' + path + '.gamepedia.com'
url1 = metaurl + '/api.php?action=query&list=users&ususers=' + q + '&usprop=groups%7Cblockinfo%7Cregistration%7Ceditcount%7Cgender&format=json'
s = requests.get(url1, timeout=10)
file = json.loads(s.text)
async with aiohttp.ClientSession() as session:
async with session.get(url1,timeout=aiohttp.ClientTimeout(total=20)) as req:
if req.status != 200:
print(f"请求发生时错误:{req.status}")
else:
s = await req.text()
file = json.loads(s)
try:
User = file['query']['users'][0]['name']
Gender = gender(file['query']['users'][0]['gender'])
@ -24,9 +29,9 @@ async def Userp(path,Username):
Blockexpiry = UTC8(str(file['query']['users'][0]['blockexpiry']),'full')
Blockreason = str(file['query']['users'][0]['blockreason'])
if not Blockreason:
PUser1bann(metaurl, q, path, User, Gender, Registration, Blockedby, Blockedtimestamp, Blockexpiry)
await PUser1bann(metaurl, q, path, User, Gender, Registration, Blockedby, Blockedtimestamp, Blockexpiry)
else:
PUser1ban(metaurl, q, path, User, Gender, Registration, Blockedby, Blockedtimestamp, Blockexpiry,\
await PUser1ban(metaurl, q, path, User, Gender, Registration, Blockedby, Blockedtimestamp, Blockexpiry,\
Blockreason)
h = '/Userprofile:' +User
return(metaurl+urllib.parse.quote(h.encode('UTF-8')))
@ -35,7 +40,7 @@ async def Userp(path,Username):
User = file['query']['users'][0]['name']
Gender = gender(file['query']['users'][0]['gender'])
Registration = UTC8(file['query']['users'][0]['registration'],'notimezone')
PUser1(metaurl, q, path, User, Gender, Registration)
await PUser1(metaurl, q, path, User, Gender, Registration)
h = '/Userprofile:' +User
return(metaurl+urllib.parse.quote(h.encode('UTF-8')))
except Exception:

View file

@ -1,6 +1,6 @@
import json
import re
import requests
import aiohttp
import re
from bs4 import BeautifulSoup as bs
import os
@ -9,11 +9,19 @@ from .hh import hh
from .hh17 import hh17
from os.path import abspath
from .tpg import tpg
def PUser1(url, str3,ss,User,Gender,Registration):
async def get_data(url: str, fmt: str):
async with aiohttp.ClientSession() as session:
async with session.get(url,timeout=aiohttp.ClientTimeout(total=20)) as req:
if hasattr(req, fmt):
return await getattr(req, fmt)()
else:
raise ValueError(f"NoSuchMethod: {fmt}")
async def PUser1(url, str3,ss,User,Gender,Registration):
q = str3
url2 = url+'/api.php?action=query&meta=allmessages&ammessages=mainpage&format=json'
c = requests.get(url2, timeout=10)
file2 = json.loads(c.text)
file2 = await get_data(url2,'json')
try:
Wikiname = file2['query']['allmessages'][0]['*']
except Exception:
@ -26,8 +34,8 @@ def PUser1(url, str3,ss,User,Gender,Registration):
from .dpng import dpng
dpng(url,ss)
url2 = url+'/UserProfile:'+q
res = requests.get(url2)
soup = bs(res.text, 'html.parser')
res = await get_data(url2,'text')
soup = bs(res, 'html.parser')
stats = soup.find('div', class_='section stats')
point = soup.find('div', class_='score').text
dd = stats.find_all('dd')
@ -39,11 +47,10 @@ def PUser1(url, str3,ss,User,Gender,Registration):
contributionwikis=ddk(str(dd[0])),createcount=ddk(str(dd[1])),\
editcount=ddk(str(dd[2])),deletecount=ddk(str(dd[3])),patrolcount=ddk(str(dd[4])),\
sitetop=ddk(str(dd[5])),globaltop=ddk(str(dd[6])),wikipoint=point)
def PUser1ban(url, str3,ss,User,Gender,Registration,Blockedby,Blockedtimestamp,Blockexpiry,Blockreason):
async def PUser1ban(url, str3,ss,User,Gender,Registration,Blockedby,Blockedtimestamp,Blockexpiry,Blockreason):
q = str3
url2 = url+'/api.php?action=query&meta=allmessages&ammessages=mainpage&format=json'
c = requests.get(url2, timeout=10)
file2 = json.loads(c.text)
file2 = await get_data(url2,'json')
try:
Wikiname = file2['query']['allmessages'][0]['*']
except Exception:
@ -56,8 +63,8 @@ def PUser1ban(url, str3,ss,User,Gender,Registration,Blockedby,Blockedtimestamp,B
from .dpng import dpng
dpng(url,ss)
url2 = url+'/UserProfile:'+q
res = requests.get(url2)
soup = bs(res.text, 'html.parser')
res = await get_data(url2,'text')
soup = bs(res, 'html.parser')
stats = soup.find('div', class_='section stats')
point = soup.find('div', class_='score').text
dd = stats.find_all('dd')
@ -70,11 +77,10 @@ def PUser1ban(url, str3,ss,User,Gender,Registration,Blockedby,Blockedtimestamp,B
patrolcount=ddk(str(dd[4])),sitetop=ddk(str(dd[5])),globaltop=ddk(str(dd[6])),\
wikipoint=point,blockbyuser=Blockedby,blocktimestamp1=Blockedtimestamp,blocktimestamp2=Blockexpiry,\
blockreason=hh17(Blockreason),bantype='Y')
def PUser1bann(url, str3,ss,User,Gender,Registration,Blockedby,Blockedtimestamp,Blockexpiry):
async def PUser1bann(url, str3,ss,User,Gender,Registration,Blockedby,Blockedtimestamp,Blockexpiry):
q = str3
url2 = url+'/api.php?action=query&meta=allmessages&ammessages=mainpage&format=json'
c = requests.get(url2, timeout=10)
file2 = json.loads(c.text)
file2 = await get_data(url2,'json')
try:
Wikiname = file2['query']['allmessages'][0]['*']
except Exception:
@ -87,8 +93,8 @@ def PUser1bann(url, str3,ss,User,Gender,Registration,Blockedby,Blockedtimestamp,
from .dpng import dpng
dpng(url,ss)
url2 = url+'/UserProfile:'+q
res = requests.get(url2)
soup = bs(res.text, 'html.parser')
res = await get_data(url2,'text')
soup = bs(res, 'html.parser')
stats = soup.find('div', class_='section stats')
point = soup.find('div', class_='score').text
dd = stats.find_all('dd')

View file

@ -1,15 +1,24 @@
import json
import re
import requests
import aiohttp
import urllib
import traceback
from interwikilist import iwlist,iwlink
async def get_data(url: str, fmt: str):
async with aiohttp.ClientSession() as session:
async with session.get(url,timeout=aiohttp.ClientTimeout(total=20)) as req:
if hasattr(req, fmt):
return await getattr(req, fmt)()
else:
raise ValueError(f"NoSuchMethod: {fmt}")
async def wiki1(path1,pagename):
print(pagename)
metaurl = path1 +'api.php?action=query&format=json&prop=info&inprop=url&redirects&titles=' + pagename
print(metaurl)
metatext = requests.get(metaurl, timeout=10)
file = json.loads(metatext.text)
file = await get_data(metaurl,"json")
try:
x = file['query']['pages']
y = sorted(x.keys())[0]
@ -22,16 +31,14 @@ async def wiki1(path1,pagename):
try:
try:
searchurl = path1+'api.php?action=query&generator=search&gsrsearch=' + pagename + '&gsrsort=just_match&gsrenablerewrites&prop=info&gsrlimit=1&format=json'
f = requests.get(searchurl)
g = json.loads(f.text)
g = await get_data(searchurl,"json")
j = g['query']['pages']
b = sorted(j.keys())[0]
m = j[b]['title']
return ('找不到条目,您是否要找的是:' + m +'')
except Exception:
searchurl = path1+'api.php?action=query&list=search&srsearch='+pagename+'&srwhat=text&srlimit=1&srenablerewrites=&format=json'
f = requests.get(searchurl)
g = json.loads(f.text)
g = await get_data(searchurl,"json")
m = g['query']['search'][0]['title']
return ('找不到条目,您是否要找的是:' + m +'')
except Exception:
@ -46,8 +53,7 @@ async def wiki1(path1,pagename):
h = re.match(r'https?://.*/(.*)', z, re.M | re.I)
try:
texturl = metaurl + '/api.php?action=query&prop=extracts&exsentences=1&&explaintext&exsectionformat=wiki&format=json&titles=' + h.group(1)
gettext = requests.get(texturl, timeout=10)
loadtext = json.loads(gettext.text)
loadtext = await get_data(texturl,"json")
v = loadtext['query']['pages'][y]['extract']
except Exception:
v = ''

View file

@ -1,16 +1,24 @@
import requests
import aiohttp
import json
import re
import urllib
import traceback
async def get_data(url: str, fmt: str):
async with aiohttp.ClientSession() as session:
async with session.get(url,timeout=aiohttp.ClientTimeout(total=20)) as req:
if hasattr(req, fmt):
return await getattr(req, fmt)()
else:
raise ValueError(f"NoSuchMethod: {fmt}")
async def wi(c,w,pagename,itw = 'f',ignoremessage = 'f',template = 'f'):
str1 = pagename
metaurl = c+'api.php?action=query&format=json&prop=info&inprop=url&redirects&titles='
url1 = c
try:
url = metaurl+pagename
metatext = requests.get(url,timeout=5)
file = json.loads(metatext.text)
file = await get_data(url,"json")
try:
try:
x = file['query']['pages']
@ -34,8 +42,7 @@ async def wi(c,w,pagename,itw = 'f',ignoremessage = 'f',template = 'f'):
try:
try:
searchurl = url1+'api.php?action=query&generator=search&gsrsearch=' + pagename + '&gsrsort=just_match&gsrenablerewrites&prop=info&gsrlimit=1&format=json'
f = requests.get(searchurl)
g = json.loads(f.text)
g = await get_data(searchurl,"json")
j = g['query']['pages']
b = sorted(j.keys())[0]
m = j[b]['title']
@ -45,8 +52,7 @@ async def wi(c,w,pagename,itw = 'f',ignoremessage = 'f',template = 'f'):
return ('提示:您要找的'+ pagename + '不存在,要找的页面是' + m + '吗?')
except Exception:
searchurl = url1+'api.php?action=query&list=search&srsearch=' + pagename + '&srwhat=text&srlimit=1&srenablerewrites=&format=json'
f = requests.get(searchurl)
g = json.loads(f.text)
g = await get_data(searchurl,"json")
m = g['query']['search'][0]['title']
if itw == 't':
m = w+':'+m
@ -74,8 +80,7 @@ async def wi(c,w,pagename,itw = 'f',ignoremessage = 'f',template = 'f'):
else:
h = re.match(r'https?://(.*?)/(.*)', z, re.M | re.I)
texturl = 'https://'+h.group(1)+'/api.php?action=query&prop=extracts&exsentences=1&&explaintext&exsectionformat=wiki&format=json&titles='+h.group(2)
textt = requests.get(texturl,timeout=5)
e = json.loads(textt.text)
e = await get_data(texturl,"json")
r = e['query']['pages'][y]['extract']
except:
r = ''