Archived
1
0
Fork 0
This commit is contained in:
yzhh 2021-02-09 23:10:51 +08:00
parent 960e0b80cd
commit 4ce31e6cb6
2 changed files with 31 additions and 16 deletions

View file

@ -1,4 +1,5 @@
import re
import traceback
import urllib
import aiohttp
@ -67,6 +68,7 @@ async def GetUser(wikiurl, username, argv=None):
GetUserJson = await get_data(UserJsonURL, 'json')
Wikiname = await getwikiname(wikiurl)
GetUserGroupsList = await get_user_group(wikiurl)
GetArticleUrl = await wikilib().get_article_path(wikiurl)
try:
User = GetUserJson['query']['users'][0]['name']
Editcount = str(GetUserJson['query']['users'][0]['editcount'])
@ -92,14 +94,18 @@ async def GetUser(wikiurl, username, argv=None):
clawerurl = wikiurl + 'UserProfile:' + username
clawer = await get_data(clawerurl, 'text')
soup = bs(clawer, 'html.parser')
stats = soup.find('div', class_='section stats')
point = soup.find('div', class_='score').text
dd = stats.find_all('dd')
Editcount = ('\n编辑过的Wiki' + str(dd[0]) + '\n创建数:' + str(dd[1]) + ' | 编辑数:' + str(dd[2]) + '\n删除数:' + str(
dd[3]) + ' | 巡查数:' + str(dd[4]) + '\n本站排名:' + str(dd[5]) + ' | 全域排名:' + str(dd[6]) + '\n好友:' + str(
dd[7]))
Editcount = re.sub(r'<dd>|</dd>', '', Editcount)
Editcount += f' | Wikipoints{point}'
try:
stats = soup.find('div', class_='section stats')
point = soup.find('div', class_='score').text
dd = stats.find_all('dd')
Editcount = ('\n编辑过的Wiki' + str(dd[0]) + '\n创建数:' + str(dd[1]) + ' | 编辑数:' + str(dd[2]) + '\n删除数:' + str(
dd[3]) + ' | 巡查数:' + str(dd[4]) + '\n本站排名:' + str(dd[5]) + ' | 全域排名:' + str(dd[6]) + '\n好友:' + str(
dd[7]))
Editcount = re.sub(r'<dd>|</dd>', '', Editcount)
Editcount += f' | Wikipoints{point}'
except:
Editcount = '无法获取到增强型用户页中的编辑信息。'
dd = ['?', '?', '?', '?', '?', '?', '?']
if argv == '-p':
import uuid
import os
@ -182,8 +188,8 @@ async def GetUser(wikiurl, username, argv=None):
globaltop=d(str(dd[6])),
wikipoint=point)
if argv == '-p':
return f'{wikiurl}UserProfile:{urllib.parse.quote(rmuser.encode("UTF-8"))}[[uimgc:{imagepath}]]'
return (wikiurl + 'UserProfile:' + urllib.parse.quote(rmuser.encode('UTF-8')) + '\n' +
return f'{GetArticleUrl}User:{urllib.parse.quote(rmuser.encode("UTF-8"))}[[uimgc:{imagepath}]]'
return (GetArticleUrl+ 'User:' + urllib.parse.quote(rmuser.encode('UTF-8')) + '\n' +
Wikiname + '\n' +
f'用户:{User} | 编辑数:{Editcount}\n' +
f'用户组:{Group}\n' +
@ -193,6 +199,5 @@ async def GetUser(wikiurl, username, argv=None):
if 'missing' in GetUserJson['query']['users'][0]:
return '没有找到此用户。'
else:
return '发生错误:' + e
import traceback
traceback.print_exc()
return '发生错误:' + e

View file

@ -56,6 +56,18 @@ class wikilib:
interwiki_dict[interwiki['prefix']] = re.sub(r'(?:wiki/|)\$1', '', interwiki['url'])
return interwiki_dict
async def get_siteinfo(self, url):
siteinfo_url = url + '?action=query&meta=siteinfo&siprop=general&format=json'
j = await self.get_data(siteinfo_url, 'json')
return j
async def get_article_path(self, url):
siteinfo = await self.get_siteinfo(url)
article_path = siteinfo['query']['general']['articlepath']
article_path = re.sub(r'\$1', '', article_path)
baseurl = re.match(r'https?://(.*?)/.*', url)
return baseurl.group(1) + article_path
async def get_image(self, pagename):
try:
url = self.wikilink + f'?action=query&titles={pagename}&prop=imageinfo&iiprop=url&format=json'
@ -123,8 +135,7 @@ class wikilib:
if 'missing' in self.psepgraw:
self.rspt = await self.researchpage()
return self.rspt
self.orginwikilink = re.sub('api.php', '', self.orginwikilink)
msg = self.orginwikilink + urllib.parse.quote(self.pagename.encode('UTF-8'))
msg = await self.get_article_path(self.wikilink) + urllib.parse.quote(self.pagename.encode('UTF-8'))
return {'status': 'done', 'text': msg}
async def getdesc(self):
@ -221,7 +232,7 @@ class wikilib:
print(pagename)
print(interwiki)
if pagename == '':
return {'status': 'done', 'text': '错误:需要查询的页面为空。'}
return {'status': 'done', 'text': await self.get_article_path(wikilink)}
pagename = re.sub('_', ' ', pagename)
pagename = pagename.split('|')[0]
self.orginwikilink = wikilink
@ -243,7 +254,6 @@ class wikilib:
matchinterwiki = re.match(r'(.*?):(.*)', self.pagename)
if matchinterwiki:
iwlist = await self.get_interwiki(self.wikilink)
print(iwlist)
if matchinterwiki.group(1) in iwlist:
if tryiw <= 5:
interwiki_link = iwlist[matchinterwiki.group(1)]