Archived
1
0
Fork 0
This repository has been archived on 2024-04-26. You can view files and clone it, but cannot push or open issues or pull requests.
akari-bot/modules/wiki/wikilib.py

395 lines
18 KiB
Python
Raw Normal View History

import datetime
import json
2020-06-13 12:43:43 +00:00
import re
2020-08-01 03:25:34 +00:00
import traceback
import urllib.parse
2020-08-12 16:01:34 +00:00
2020-09-05 09:51:43 +00:00
import aiohttp
2021-02-19 11:26:19 +00:00
from core import dirty_check
2021-02-01 15:13:11 +00:00
from .helper import check_wiki_available
from .database import WikiDB
2020-08-12 16:01:34 +00:00
2020-08-12 08:01:00 +00:00
2021-02-01 15:13:11 +00:00
class wikilib:
2021-03-21 05:00:17 +00:00
async def get_data(self, url: str, fmt: str, headers=None):
async with aiohttp.ClientSession(headers=headers) as session:
2020-10-27 15:48:41 +00:00
try:
async with session.get(url, timeout=aiohttp.ClientTimeout(total=20)) as req:
if hasattr(req, fmt):
return await getattr(req, fmt)()
else:
raise ValueError(f"NoSuchMethod: {fmt}")
except Exception:
traceback.print_exc()
2021-02-01 15:13:11 +00:00
return False
def danger_wiki_check(self):
if self.wikilink.upper().find('WIKIPEDIA') != -1:
return True
2021-02-02 11:40:13 +00:00
if self.wikilink.upper().find('UNCYCLOPEDIA') != -1:
return True
if self.wikilink.upper().find('HMOEGIRL') != -1:
return True
if self.wikilink.upper().find('EVCHK') != -1:
return True
if self.wikilink.upper().find('HONGKONG.FANDOM') != -1:
return True
if self.wikilink.upper().find('WIKILEAKS') != -1:
return True
2021-02-04 08:25:13 +00:00
if self.wikilink.upper().find('NANFANGGONGYUAN') != -1:
return True
2021-02-01 15:13:11 +00:00
return False
async def danger_text_check(self, text):
if not self.danger_wiki_check():
return False
2021-02-19 11:26:19 +00:00
check = await dirty_check.check(text)
2021-02-01 15:13:11 +00:00
print(check)
if check.find('<吃掉了>') != -1 or check.find('<全部吃掉了>') != -1:
return True
return False
2021-03-21 12:33:05 +00:00
async def random_page(self, url, iw=None, headers=None):
random_url = url + '?action=query&list=random&format=json'
json = await self.get_data(random_url, 'json')
randompage = json['query']['random'][0]['title']
2021-03-21 12:41:07 +00:00
return await self.main(url, randompage, interwiki=iw, headers=headers)
2021-03-21 12:33:05 +00:00
async def get_wiki_info(self, url=None):
url = url if url is not None else self.wikilink
getcacheinfo = WikiDB.get_wikiinfo(url)
if getcacheinfo and ((datetime.datetime.strptime(getcacheinfo[1], "%Y-%m-%d %H:%M:%S") + datetime.timedelta(
hours=8)).timestamp() - datetime.datetime.now().timestamp()) > - 43200:
return json.loads(getcacheinfo[0])
wiki_info_url = url + '?action=query&meta=siteinfo&siprop=general|namespaces|namespacealiases|interwikimap|extensions&format=json'
j = await self.get_data(wiki_info_url, 'json')
WikiDB.update_wikiinfo(url, json.dumps(j))
return j
async def get_interwiki(self, url=None):
if url is None:
json = self.wiki_info
else:
json = await self.get_wiki_info(url)
2021-02-01 15:13:11 +00:00
interwikimap = json['query']['interwikimap']
interwiki_dict = {}
for interwiki in interwikimap:
interwiki_dict[interwiki['prefix']] = interwiki['url']
2021-02-01 15:13:11 +00:00
return interwiki_dict
2021-02-14 16:34:04 +00:00
async def get_namespace(self, url=None):
if url is None:
j = self.wiki_info
else:
j = await self.get_wiki_info(url)
2021-02-14 16:34:04 +00:00
d = {}
for x in j['query']['namespaces']:
try:
d[j['query']['namespaces'][x]['*']] = j['query']['namespaces'][x]['canonical']
except KeyError:
pass
2021-02-14 16:34:04 +00:00
except:
traceback.print_exc()
2021-04-02 16:35:26 +00:00
for x in j['query']['namespacealiases']:
try:
2021-04-02 16:46:37 +00:00
d[x['*']] = 'aliases'
except KeyError:
pass
2021-04-02 16:35:26 +00:00
except:
traceback.print_exc()
2021-02-14 16:34:04 +00:00
return d
async def get_article_path(self, url=None):
if url is None:
wiki_info = self.wiki_info
url = self.wikilink
else:
wiki_info = await self.get_wiki_info(url)
if not wiki_info:
return False
article_path = wiki_info['query']['general']['articlepath']
2021-02-09 15:10:51 +00:00
article_path = re.sub(r'\$1', '', article_path)
2021-02-09 15:57:18 +00:00
baseurl = re.match(r'(https?://.*?)/.*', url)
2021-02-09 15:10:51 +00:00
return baseurl.group(1) + article_path
async def get_enabled_extensions(self, url=None):
if url is None:
wiki_info = self.wiki_info
else:
wiki_info = await self.get_wiki_info(url)
extensions = wiki_info['query']['extensions']
extlist = []
for ext in extensions:
extlist.append(ext['name'])
return extlist
2021-02-09 15:33:37 +00:00
async def get_image(self, pagename, wikilink=None):
2021-02-01 15:13:11 +00:00
try:
2021-02-14 15:25:39 +00:00
url = (
wikilink if wikilink is not None else self.wikilink) + f'?action=query&titles={pagename}&prop=imageinfo&iiprop=url&format=json'
2021-02-01 15:13:11 +00:00
json = await self.get_data(url, 'json')
parsepageid = self.parsepageid(json)
imagelink = json['query']['pages'][parsepageid]['imageinfo'][0]['url']
return imagelink
except:
traceback.print_exc()
return False
2020-10-27 15:48:41 +00:00
2021-02-14 15:25:39 +00:00
async def getpage(self, pagename=None):
pagename = pagename if pagename is not None else self.pagename
2021-02-18 11:44:00 +00:00
pagename = re.sub('(.*)\?.*$', '\\1', pagename)
2021-02-14 15:25:39 +00:00
getlinkurl = self.wikilink + '?action=query&format=json&prop=info&inprop=url&redirects&titles=' + pagename
2020-10-27 15:48:41 +00:00
getpage = await self.get_data(getlinkurl, "json")
return getpage
2020-10-28 15:27:36 +00:00
def parsepageid(self, pageraw):
pageraw = pageraw['query']['pages']
2020-10-27 15:48:41 +00:00
pagelist = iter(pageraw)
pageid = pagelist.__next__()
return pageid
async def researchpage(self):
2020-09-09 12:16:01 +00:00
try:
2021-02-14 16:34:04 +00:00
try:
searchurl = self.wikilink + '?action=query&generator=search&gsrsearch=' + self.pagename + '&gsrsort=just_match&gsrenablerewrites&prop=info&gsrlimit=1&format=json'
2021-03-21 05:00:17 +00:00
getsecjson = await self.get_data(searchurl, "json", self.headers)
2021-02-14 16:34:04 +00:00
secpageid = self.parsepageid(getsecjson)
sectitle = getsecjson['query']['pages'][secpageid]['title']
except:
traceback.print_exc()
searchurl = self.wikilink + '?action=query&list=search&srsearch=' + self.pagename + '&srwhat=text&srlimit=1&srenablerewrites=&format=json'
2021-03-21 05:00:17 +00:00
getsecjson = await self.get_data(searchurl, "json", self.headers)
2021-02-14 16:34:04 +00:00
sectitle = getsecjson['query']['search'][0]['title']
2020-10-27 15:48:41 +00:00
if self.interwiki == '':
target = ''
else:
target = f'{self.interwiki}:'
2021-02-01 15:13:11 +00:00
prompt = f'找不到{target}{self.pagename},您是否要找的是:[[{target}{sectitle}]]'
2021-02-14 16:34:04 +00:00
titlesplit = self.pagename.split(':')
if len(titlesplit) > 1:
2021-03-20 12:33:55 +00:00
try:
get_namespace = await self.get_namespace()
if titlesplit[0] not in get_namespace:
2021-03-21 05:12:52 +00:00
prompt += f'\n提示此Wiki上找不到“{titlesplit[0]}”名字空间请检查是否设置了对应的Interwiki使用~wiki iw list命令可以查询当前已设置的Interwiki'
2021-03-20 12:33:55 +00:00
except:
traceback.print_exc()
2021-02-01 15:13:11 +00:00
if self.templateprompt:
prompt = self.templateprompt + prompt
if await self.danger_text_check(prompt):
2021-02-01 17:20:45 +00:00
return {'status': 'done', 'text': 'https://wdf.ink/6OUp'}
2021-02-01 15:13:11 +00:00
return {'status': 'wait', 'title': f'{target}{sectitle}', 'text': prompt}
2020-10-27 15:48:41 +00:00
except Exception:
2021-02-14 16:34:04 +00:00
traceback.print_exc()
return {'status': 'done', 'text': '找不到条目。'}
2020-10-27 15:48:41 +00:00
async def nullpage(self):
if 'invalid' in self.psepgraw:
rs1 = re.sub('The requested page title contains invalid characters:', '请求的页面标题包含非法字符:',
self.psepgraw['invalidreason'])
rs = '发生错误:“' + rs1 + '”。'
rs = re.sub('".”', '"', rs)
2021-02-01 15:13:11 +00:00
return {'status': 'done', 'text': rs}
2020-10-27 15:48:41 +00:00
if 'missing' in self.psepgraw:
self.rspt = await self.researchpage()
return self.rspt
2021-02-09 15:10:51 +00:00
msg = await self.get_article_path(self.wikilink) + urllib.parse.quote(self.pagename.encode('UTF-8'))
2021-02-01 15:13:11 +00:00
return {'status': 'done', 'text': msg}
2020-10-27 15:48:41 +00:00
async def getdesc(self):
try:
descurl = self.wikilink + '?action=query&prop=info|pageprops|extracts&ppprop=description|displaytitle|disambiguation|infoboxes&explaintext=true&exsectionformat=plain&exchars=200&format=json&titles=' + self.querytextname
2021-03-21 05:00:17 +00:00
loadtext = await self.get_data(descurl, "json", self.headers)
2020-10-28 15:27:36 +00:00
pageid = self.parsepageid(loadtext)
2020-10-27 15:48:41 +00:00
desc = loadtext['query']['pages'][pageid]['extract']
desc = re.findall(r'(.*?(?:\!|\?|\.|\;|||。|)).*', desc, re.S | re.M)[0]
2020-09-09 12:16:01 +00:00
except Exception:
2020-10-28 15:27:36 +00:00
traceback.print_exc()
2020-10-27 15:48:41 +00:00
desc = ''
return desc
2020-09-05 09:51:43 +00:00
2020-10-27 15:48:41 +00:00
async def getfirstline(self):
try:
2021-02-14 15:25:39 +00:00
descurl = self.wikilink + f'?action=parse&page={self.querytextname}&prop=wikitext&section=0&format=json'
2021-03-21 05:00:17 +00:00
loaddesc = await self.get_data(descurl, 'json', self.headers)
2020-10-27 15:48:41 +00:00
descraw = loaddesc['parse']['wikitext']['*']
2021-02-14 15:25:39 +00:00
try:
cutdesc = re.findall(r'(.*?(?:!|\?|\.|;|||。|)).*', descraw, re.S | re.M)
2021-02-14 15:25:39 +00:00
desc = cutdesc[0]
except IndexError:
desc = descraw
except Exception:
traceback.print_exc()
desc = ''
return desc
async def getalltext(self):
try:
descurl = self.wikilink + f'?action=parse&page={self.querytextname}&prop=wikitext&format=json'
2021-03-21 05:00:17 +00:00
loaddesc = await self.get_data(descurl, 'json', self.headers)
2021-02-14 15:25:39 +00:00
desc = loaddesc['parse']['wikitext']['*']
2020-10-27 15:48:41 +00:00
except Exception:
2021-02-01 15:13:11 +00:00
traceback.print_exc()
2020-10-27 15:48:41 +00:00
desc = ''
return desc
async def step1(self):
2021-02-02 10:01:46 +00:00
try:
self.pageid = self.parsepageid(self.pageraw)
except:
return {'status': 'done', 'text': '发生错误无法获取到页面请检查是否设置了对应Interwiki。'}
2020-10-27 15:48:41 +00:00
self.psepgraw = self.pageraw['query']['pages'][self.pageid]
if self.pageid == '-1':
if self.template == True:
self.pagename = self.orginpagename = re.sub(r'^Template:', '', self.pagename)
self.template = False
self.templateprompt = f'提示:[Template:{self.pagename}]不存在,已自动回滚搜索页面。\n'
return await self.step1()
return await self.nullpage()
2020-06-13 12:43:43 +00:00
else:
2020-10-27 15:48:41 +00:00
return await self.step2()
async def step2(self):
try:
2021-02-14 15:25:39 +00:00
fullurl = self.psepgraw['fullurl']
geturlpagename = fullurl.split(self.wiki_articlepath)[1]
2021-02-14 16:34:04 +00:00
self.querytextname = urllib.parse.unquote(geturlpagename)
querytextnamesplit = self.querytextname.split(':')
if len(querytextnamesplit) > 1:
namespaces = await self.get_namespace()
if querytextnamesplit[0] in namespaces:
if namespaces[querytextnamesplit[0]] == 'Template':
getalltext = await self.getalltext()
try:
matchdoc = re.match(r'.*{{documentation\|?(.*?)}}.*', getalltext, re.I | re.S)
matchlink = re.match(r'link=(.*)', matchdoc.group(1), re.I | re.S)
if matchlink:
getdoc = matchlink.group(1)
getdocraw = await self.getpage(getdoc)
getdocid = self.parsepageid(getdocraw)
getdoclink = getdocraw['query']['pages'][getdocid]['fullurl']
getdocpagename = getdoclink.split(self.wiki_articlepath)[1]
2021-02-14 16:34:04 +00:00
self.querytextname = getdocpagename
else:
self.querytextname = geturlpagename + '/doc'
except AttributeError:
self.querytextname = geturlpagename + '/doc'
if 'TextExtracts' in await self.get_enabled_extensions():
desc = await self.getdesc()
else:
desc = ''
2021-02-14 15:25:39 +00:00
if desc == '':
desc = await self.getfirstline()
print(desc)
2021-02-18 11:44:00 +00:00
finpgname = geturlpagename
2021-02-14 15:25:39 +00:00
try:
section = re.match(r'.*(\#.*)', self.pagename)
2021-02-18 11:44:00 +00:00
if section:
finpgname = geturlpagename + urllib.parse.quote(section.group(1).encode('UTF-8'))
fullurl = self.psepgraw['fullurl'] + urllib.parse.quote(section.group(1).encode('UTF-8'))
2021-02-14 15:25:39 +00:00
except Exception:
2021-03-21 05:00:17 +00:00
traceback.print_exc()
2021-02-18 11:44:00 +00:00
try:
pgtag = re.match(r'.*(\?.*)', self.pagename)
if pgtag:
finpgname = geturlpagename + pgtag.group(1)
fullurl = fullurl + pgtag.group(1)
except Exception:
2021-03-21 05:00:17 +00:00
traceback.print_exc()
2021-02-14 15:25:39 +00:00
finpgname = urllib.parse.unquote(finpgname)
finpgname = re.sub('_', ' ', finpgname)
if finpgname == self.orginpagename:
rmlstlb = re.sub('\n$', '', desc)
else:
if self.interwiki == '':
target = ''
else:
target = f'{self.interwiki}:'
rmlstlb = re.sub('\n$', '',
2021-02-18 11:44:00 +00:00
f'(重定向[{target}{self.orginpagename}] -> [{target}{finpgname}]' + (
'\n' if desc != '' else '') + f'{desc}')
2021-02-14 15:25:39 +00:00
rmlstlb = re.sub('\n\n', '\n', rmlstlb)
if len(rmlstlb) > 250:
rmlstlb = rmlstlb[0:250] + '...'
2021-02-14 15:25:39 +00:00
try:
rm5lline = re.findall(r'.*\n.*\n.*\n.*\n.*\n', rmlstlb)
result = rm5lline[0] + '...'
2021-02-14 15:25:39 +00:00
except Exception:
result = rmlstlb
msgs = {'status': 'done', 'url': fullurl, 'text': result, 'apilink': self.wikilink}
matchimg = re.match(r'File:.*?\.(?:png|gif|jpg|jpeg|webp|bmp|ico)', self.pagename, re.I)
if matchimg:
getimg = await self.get_image(self.pagename)
if getimg:
msgs['net_image'] = getimg
matchaud = re.match(r'File:.*?\.(?:oga|ogg|flac|mp3|wav)', self.pagename, re.I)
if matchaud:
getaud = await self.get_image(self.pagename)
if getaud:
msgs['net_audio'] = getaud
2021-03-21 05:00:17 +00:00
if result != '' and await self.danger_text_check(result):
2021-02-14 15:25:39 +00:00
return {'status': 'done', 'text': 'https://wdf.ink/6OUp'}
return msgs
except Exception as e:
traceback.print_exc()
return {'status': 'done', 'text': '发生错误:' + str(e)}
2021-02-01 15:13:11 +00:00
async def main(self, wikilink, pagename, interwiki=None, template=False, headers=None, tryiw=0):
2020-10-27 15:48:41 +00:00
print(wikilink)
print(pagename)
print(interwiki)
2021-02-02 05:33:47 +00:00
if pagename == '':
article_path = await self.get_article_path(wikilink)
if not article_path:
article_path = '发生错误此站点或许不是有效的Mediawiki网站。' + wikilink
return {'status': 'done', 'text': article_path}
2021-02-01 15:13:11 +00:00
pagename = re.sub('_', ' ', pagename)
pagename = pagename.split('|')[0]
self.wikilink = wikilink
2021-02-01 15:13:11 +00:00
danger_check = self.danger_wiki_check()
if danger_check:
if await self.danger_text_check(pagename):
2021-02-01 17:20:45 +00:00
return {'status': 'done', 'text': 'https://wdf.ink/6OUp'}
2020-10-28 15:27:36 +00:00
self.orginpagename = pagename
2020-10-27 15:48:41 +00:00
self.pagename = pagename
2021-02-01 15:13:11 +00:00
if interwiki == None:
self.interwiki = ''
else:
self.interwiki = interwiki
self.wiki_info = await self.get_wiki_info()
self.wiki_namespace = await self.get_namespace()
self.wiki_articlepath = await self.get_article_path()
2020-10-27 15:48:41 +00:00
self.template = template
2021-02-01 15:13:11 +00:00
self.templateprompt = None
2021-03-21 05:00:17 +00:00
self.headers = headers
if self.template:
2021-04-18 12:57:57 +00:00
if not re.match('^Template:', self.pagename, re.I):
self.pagename = 'Template:' + self.pagename
self.pageraw = await self.getpage()
if not self.pageraw:
return {'status': 'done', 'text': '发生错误:无法获取到页面。'}
if 'interwiki' in self.pageraw['query']:
iwp = self.pageraw['query']['interwiki'][0]
matchinterwiki = re.match(r'^' + iwp['iw'] + r':(.*)', iwp['title'])
if tryiw <= 5:
2021-02-01 15:13:11 +00:00
iwlist = await self.get_interwiki(self.wikilink)
interwiki_link = iwlist[iwp['iw']]
check = await check_wiki_available(interwiki_link)
if check:
return await self.main(check[0], matchinterwiki.group(1),
((interwiki + ':') if interwiki is not None else '') + iwp['iw'], self.template, headers, tryiw + 1)
else:
return {'status': 'done',
'text': f'发生错误指向的interwiki不是一个有效的MediaWiki。{interwiki_link}{matchinterwiki.group(1)}'}
else:
return {'status': 'warn', 'text': '警告尝试重定向已超过5次继续尝试将有可能导致你被机器人加入黑名单。'}
if 'redirects' in self.pageraw['query']:
self.pagename = self.pageraw['query']['redirects'][0]['to']
try:
2020-10-27 15:48:41 +00:00
return await self.step1()
except Exception as e:
traceback.print_exc()
return f'发生错误:{str(e)}' + '\n'