switch to requests for HTTP queries
parent
259c222623
commit
5cb88f3cf8
|
@ -12,6 +12,11 @@ Compatibility with existing phenny modules has been mostly retained, but they
|
||||||
will need to be updated to run on Python3 if they do not already. All of the
|
will need to be updated to run on Python3 if they do not already. All of the
|
||||||
core modules have been ported.
|
core modules have been ported.
|
||||||
|
|
||||||
|
Requirements
|
||||||
|
------------
|
||||||
|
* Python 3.2+
|
||||||
|
* [python-requests](http://docs.python-requests.org/en/latest/)
|
||||||
|
|
||||||
Installation
|
Installation
|
||||||
------------
|
------------
|
||||||
1. Run `./phenny` - this creates a default config file
|
1. Run `./phenny` - this creates a default config file
|
||||||
|
|
|
@ -10,7 +10,8 @@ modified from Wikipedia module
|
||||||
author: mutantmonkey <mutantmonkey@mutantmonkey.in>
|
author: mutantmonkey <mutantmonkey@mutantmonkey.in>
|
||||||
"""
|
"""
|
||||||
|
|
||||||
import re, urllib.request, urllib.parse, urllib.error
|
import re
|
||||||
|
import web
|
||||||
import wiki
|
import wiki
|
||||||
|
|
||||||
wikiapi = 'https://wiki.archlinux.org/api.php?action=query&list=search&srsearch={0}&limit=1&prop=snippet&format=json'
|
wikiapi = 'https://wiki.archlinux.org/api.php?action=query&list=search&srsearch={0}&limit=1&prop=snippet&format=json'
|
||||||
|
@ -23,7 +24,7 @@ def awik(phenny, input):
|
||||||
if not origterm:
|
if not origterm:
|
||||||
return phenny.say('Perhaps you meant ".awik dwm"?')
|
return phenny.say('Perhaps you meant ".awik dwm"?')
|
||||||
|
|
||||||
term = urllib.parse.unquote(origterm)
|
term = web.unquote(origterm)
|
||||||
term = term[0].upper() + term[1:]
|
term = term[0].upper() + term[1:]
|
||||||
term = term.replace(' ', '_')
|
term = term.replace(' ', '_')
|
||||||
|
|
||||||
|
@ -31,7 +32,7 @@ def awik(phenny, input):
|
||||||
|
|
||||||
try:
|
try:
|
||||||
result = w.search(term)
|
result = w.search(term)
|
||||||
except IOError:
|
except web.ConnectionError:
|
||||||
error = "Can't connect to wiki.archlinux.org ({0})".format(wikiuri.format(term))
|
error = "Can't connect to wiki.archlinux.org ({0})".format(wikiuri.format(term))
|
||||||
return phenny.say(error)
|
return phenny.say(error)
|
||||||
|
|
||||||
|
|
|
@ -7,7 +7,14 @@ Licensed under the Eiffel Forum License 2.
|
||||||
http://inamidst.com/phenny/
|
http://inamidst.com/phenny/
|
||||||
"""
|
"""
|
||||||
|
|
||||||
import re, math, time, urllib.request, urllib.parse, urllib.error, locale, socket, struct, datetime
|
import re
|
||||||
|
import math
|
||||||
|
import time
|
||||||
|
import locale
|
||||||
|
import socket
|
||||||
|
import struct
|
||||||
|
import datetime
|
||||||
|
import web
|
||||||
from decimal import Decimal as dec
|
from decimal import Decimal as dec
|
||||||
from tools import deprecated
|
from tools import deprecated
|
||||||
|
|
||||||
|
@ -273,9 +280,7 @@ yi.priority = 'low'
|
||||||
|
|
||||||
def tock(phenny, input):
|
def tock(phenny, input):
|
||||||
"""Shows the time from the USNO's atomic clock."""
|
"""Shows the time from the USNO's atomic clock."""
|
||||||
u = urllib.request.urlopen('http://tycho.usno.navy.mil/cgi-bin/timer.pl')
|
info = web.head('http://tycho.usno.navy.mil/cgi-bin/timer.pl')
|
||||||
info = u.info()
|
|
||||||
u.close()
|
|
||||||
phenny.say('"' + info['Date'] + '" - tycho.usno.navy.mil')
|
phenny.say('"' + info['Date'] + '" - tycho.usno.navy.mil')
|
||||||
tock.commands = ['tock']
|
tock.commands = ['tock']
|
||||||
tock.priority = 'high'
|
tock.priority = 'high'
|
||||||
|
|
|
@ -4,16 +4,16 @@ commit.py - what the commit
|
||||||
author: mutantmonkey <mutantmonkey@mutantmonkey.in>
|
author: mutantmonkey <mutantmonkey@mutantmonkey.in>
|
||||||
"""
|
"""
|
||||||
|
|
||||||
from urllib.error import HTTPError
|
|
||||||
import web
|
import web
|
||||||
from tools import GrumbleError
|
from tools import GrumbleError
|
||||||
|
|
||||||
|
|
||||||
def commit(phenny, input):
|
def commit(phenny, input):
|
||||||
""".commit - Get a What the Commit commit message."""
|
""".commit - Get a What the Commit commit message."""
|
||||||
|
|
||||||
try:
|
try:
|
||||||
msg = web.get("http://whatthecommit.com/index.txt")
|
msg = web.get("http://whatthecommit.com/index.txt")
|
||||||
except (HTTPError, IOError, ValueError):
|
except:
|
||||||
raise GrumbleError("THE INTERNET IS FUCKING BROKEN. Please try again later.")
|
raise GrumbleError("THE INTERNET IS FUCKING BROKEN. Please try again later.")
|
||||||
|
|
||||||
phenny.reply(msg)
|
phenny.reply(msg)
|
||||||
|
|
|
@ -4,7 +4,6 @@ fcc.py - fcc callsign lookup
|
||||||
author: mutantmonkey <mutantmonkey@mutantmonkey.in>
|
author: mutantmonkey <mutantmonkey@mutantmonkey.in>
|
||||||
"""
|
"""
|
||||||
|
|
||||||
from urllib.error import HTTPError
|
|
||||||
from tools import GrumbleError
|
from tools import GrumbleError
|
||||||
import web
|
import web
|
||||||
import json
|
import json
|
||||||
|
@ -20,7 +19,7 @@ def fcc(phenny, input):
|
||||||
try:
|
try:
|
||||||
req = web.get("http://callook.info/{0}/json".format(web.quote(callsign)))
|
req = web.get("http://callook.info/{0}/json".format(web.quote(callsign)))
|
||||||
data = json.loads(req)
|
data = json.loads(req)
|
||||||
except (HTTPError, IOError, ValueError):
|
except:
|
||||||
raise GrumbleError("THE INTERNET IS FUCKING BROKEN. Please try again later.")
|
raise GrumbleError("THE INTERNET IS FUCKING BROKEN. Please try again later.")
|
||||||
|
|
||||||
if len(data) <= 0 or data['status'] == 'INVALID':
|
if len(data) <= 0 or data['status'] == 'INVALID':
|
||||||
|
|
|
@ -4,7 +4,6 @@ foodforus.py - foodforus module
|
||||||
author: mutantmonkey <mutantmonkey@mutantmonkey.in>
|
author: mutantmonkey <mutantmonkey@mutantmonkey.in>
|
||||||
"""
|
"""
|
||||||
|
|
||||||
from urllib.error import HTTPError
|
|
||||||
from tools import GrumbleError
|
from tools import GrumbleError
|
||||||
import hashlib
|
import hashlib
|
||||||
import json
|
import json
|
||||||
|
@ -31,7 +30,7 @@ def food(phenny, input):
|
||||||
try:
|
try:
|
||||||
req = web.get(API_URL + '/food/' + web.quote(key.strip()))
|
req = web.get(API_URL + '/food/' + web.quote(key.strip()))
|
||||||
data = json.loads(req)
|
data = json.loads(req)
|
||||||
except (HTTPError, IOError):
|
except:
|
||||||
raise GrumbleError("Uh oh, I couldn't contact foodforus. HOW WILL WE "\
|
raise GrumbleError("Uh oh, I couldn't contact foodforus. HOW WILL WE "\
|
||||||
"EAT NOW‽")
|
"EAT NOW‽")
|
||||||
|
|
||||||
|
@ -66,8 +65,8 @@ def foodvote(phenny, input):
|
||||||
|
|
||||||
try:
|
try:
|
||||||
req = web.post(API_URL + '/vote', postdata)
|
req = web.post(API_URL + '/vote', postdata)
|
||||||
data = json.loads(req)
|
data = json.loads(req.text)
|
||||||
except (HTTPError, IOError):
|
except:
|
||||||
raise GrumbleError("Uh oh, I couldn't contact foodforus. HOW WILL WE "\
|
raise GrumbleError("Uh oh, I couldn't contact foodforus. HOW WILL WE "\
|
||||||
"EAT NOW‽")
|
"EAT NOW‽")
|
||||||
|
|
||||||
|
@ -83,7 +82,7 @@ def pickfood(phenny, input):
|
||||||
try:
|
try:
|
||||||
req = web.get(API_URL + '/food/' + web.quote(key.strip()))
|
req = web.get(API_URL + '/food/' + web.quote(key.strip()))
|
||||||
data = json.loads(req)
|
data = json.loads(req)
|
||||||
except (HTTPError, IOError):
|
except:
|
||||||
raise GrumbleError("Uh oh, I couldn't contact foodforus. HOW WILL WE "\
|
raise GrumbleError("Uh oh, I couldn't contact foodforus. HOW WILL WE "\
|
||||||
"EAT NOW‽")
|
"EAT NOW‽")
|
||||||
|
|
||||||
|
|
|
@ -8,20 +8,19 @@ http://inamidst.com/phenny/
|
||||||
"""
|
"""
|
||||||
|
|
||||||
import re
|
import re
|
||||||
import urllib.request
|
#import urllib.request
|
||||||
import urllib.parse
|
import urllib.parse
|
||||||
import urllib.error
|
#import http.client
|
||||||
import http.client
|
#import http.cookiejar
|
||||||
import http.cookiejar
|
|
||||||
import time
|
import time
|
||||||
from html.entities import name2codepoint
|
from html.entities import name2codepoint
|
||||||
import web
|
import web
|
||||||
from tools import deprecated
|
from tools import deprecated
|
||||||
from modules.linx import get_title as linx_gettitle
|
from modules.linx import get_title as linx_gettitle
|
||||||
|
|
||||||
cj = http.cookiejar.LWPCookieJar()
|
#cj = http.cookiejar.LWPCookieJar()
|
||||||
opener = urllib.request.build_opener(urllib.request.HTTPCookieProcessor(cj))
|
#opener = urllib.request.build_opener(urllib.request.HTTPCookieProcessor(cj))
|
||||||
urllib.request.install_opener(opener)
|
#urllib.request.install_opener(opener)
|
||||||
|
|
||||||
|
|
||||||
def head(phenny, input):
|
def head(phenny, input):
|
||||||
|
@ -47,11 +46,9 @@ def head(phenny, input):
|
||||||
try:
|
try:
|
||||||
info = web.head(uri)
|
info = web.head(uri)
|
||||||
info['status'] = '200'
|
info['status'] = '200'
|
||||||
except urllib.error.HTTPError as e:
|
except web.HTTPError as e:
|
||||||
return phenny.say(str(e.code))
|
return phenny.say(str(e.code))
|
||||||
except http.client.InvalidURL:
|
except web.ConnectionError:
|
||||||
return phenny.say("Not a valid URI, sorry.")
|
|
||||||
except IOError:
|
|
||||||
return phenny.say("Can't connect to %s" % uri)
|
return phenny.say("Can't connect to %s" % uri)
|
||||||
|
|
||||||
resptime = time.time() - start
|
resptime = time.time() - start
|
||||||
|
@ -159,7 +156,7 @@ def gettitle(phenny, uri):
|
||||||
#bytes = u.read(262144)
|
#bytes = u.read(262144)
|
||||||
#u.close()
|
#u.close()
|
||||||
|
|
||||||
except IOError:
|
except web.ConnectionError:
|
||||||
return
|
return
|
||||||
|
|
||||||
m = r_title.search(bytes)
|
m = r_title.search(bytes)
|
||||||
|
|
|
@ -17,8 +17,8 @@ NS = NS = '{urn:oasis:names:tc:DSML:2:0:core}'
|
||||||
def search(query):
|
def search(query):
|
||||||
query = web.quote(query)
|
query = web.quote(query)
|
||||||
try:
|
try:
|
||||||
req = web.get(SEARCH_URL.format(query))
|
req = web.get(SEARCH_URL.format(query), verify=False)
|
||||||
except (HTTPError, IOError):
|
except (web.ConnectionError, web.HTTPError):
|
||||||
raise GrumbleError("THE INTERNET IS FUCKING BROKEN. Please try again later.")
|
raise GrumbleError("THE INTERNET IS FUCKING BROKEN. Please try again later.")
|
||||||
|
|
||||||
xml = lxml.etree.fromstring(req.encode('utf-8'))
|
xml = lxml.etree.fromstring(req.encode('utf-8'))
|
||||||
|
|
|
@ -6,12 +6,9 @@ author: Casey Link <unnamedrambler@gmail.com>
|
||||||
"""
|
"""
|
||||||
|
|
||||||
import random
|
import random
|
||||||
|
import configparser
|
||||||
import configparser, os
|
import os
|
||||||
import http.client
|
import web
|
||||||
from urllib.parse import quote as urlquote
|
|
||||||
from urllib.request import urlopen
|
|
||||||
from urllib.error import HTTPError
|
|
||||||
from lxml import etree
|
from lxml import etree
|
||||||
from datetime import datetime
|
from datetime import datetime
|
||||||
|
|
||||||
|
@ -85,19 +82,15 @@ def now_playing(phenny, input):
|
||||||
user = arg
|
user = arg
|
||||||
user = user.strip()
|
user = user.strip()
|
||||||
try:
|
try:
|
||||||
req = urlopen("%smethod=user.getrecenttracks&user=%s" % (APIURL, urlquote(user)))
|
req = web.get("%smethod=user.getrecenttracks&user=%s" % (APIURL, web.quote(user)))
|
||||||
except HTTPError as e:
|
except web.HTTPError as e:
|
||||||
if e.code == 400:
|
if e.response.status_code == 400:
|
||||||
phenny.say("%s doesn't exist on last.fm, perhaps they need to set user" % (user))
|
phenny.say("%s doesn't exist on last.fm, perhaps they need to set user" % (user))
|
||||||
return
|
return
|
||||||
else:
|
else:
|
||||||
phenny.say("uhoh. try again later, mmkay?")
|
phenny.say("uhoh. try again later, mmkay?")
|
||||||
return
|
return
|
||||||
except http.client.BadStatusLine:
|
root = etree.fromstring(req.encode('utf-8'))
|
||||||
phenny.say("uhoh. try again later, mmkay?")
|
|
||||||
return
|
|
||||||
doc = etree.parse(req)
|
|
||||||
root = doc.getroot()
|
|
||||||
recenttracks = list(root)
|
recenttracks = list(root)
|
||||||
if len(recenttracks) == 0:
|
if len(recenttracks) == 0:
|
||||||
phenny.say("%s hasn't played anything recently. this isn't you? try lastfm-set" % (user))
|
phenny.say("%s hasn't played anything recently. this isn't you? try lastfm-set" % (user))
|
||||||
|
@ -155,16 +148,15 @@ def tasteometer(phenny, input):
|
||||||
if not user2:
|
if not user2:
|
||||||
user2 = input.nick
|
user2 = input.nick
|
||||||
try:
|
try:
|
||||||
req = urlopen("%smethod=tasteometer.compare&type1=user&type2=user&value1=%s&value2=%s" % (APIURL, urlquote(user1), urlquote(user2)))
|
req = web.get("%smethod=tasteometer.compare&type1=user&type2=user&value1=%s&value2=%s" % (APIURL, web.quote(user1), web.quote(user2)))
|
||||||
except (HTTPError, http.client.BadStatusLine) as e:
|
except web.HTTPError as e:
|
||||||
if e.code == 400:
|
if e.response.status_code == 400:
|
||||||
phenny.say("uhoh, someone doesn't exist on last.fm, perhaps they need to set user")
|
phenny.say("uhoh, someone doesn't exist on last.fm, perhaps they need to set user")
|
||||||
return
|
return
|
||||||
else:
|
else:
|
||||||
phenny.say("uhoh. try again later, mmkay?")
|
phenny.say("uhoh. try again later, mmkay?")
|
||||||
return
|
return
|
||||||
doc = etree.parse(req)
|
root = etree.fromstring(req.encode('utf-8'))
|
||||||
root = doc.getroot()
|
|
||||||
score = root.xpath('comparison/result/score')
|
score = root.xpath('comparison/result/score')
|
||||||
if len(score) == 0:
|
if len(score) == 0:
|
||||||
phenny.say("something isn't right. have those users scrobbled?")
|
phenny.say("something isn't right. have those users scrobbled?")
|
||||||
|
|
|
@ -5,7 +5,6 @@ author: andreim <andreim@andreim.net>
|
||||||
author: mutantmonkey <mutantmonkey@mutantmonkey.in>
|
author: mutantmonkey <mutantmonkey@mutantmonkey.in>
|
||||||
"""
|
"""
|
||||||
|
|
||||||
from urllib.error import HTTPError
|
|
||||||
from tools import GrumbleError
|
from tools import GrumbleError
|
||||||
import web
|
import web
|
||||||
import json
|
import json
|
||||||
|
@ -29,7 +28,7 @@ def linx(phenny, input, short=False):
|
||||||
|
|
||||||
try:
|
try:
|
||||||
req = web.post("https://linx.li/vtluug", {'url': url, 'short': short, 'api_key': phenny.config.linx_api_key})
|
req = web.post("https://linx.li/vtluug", {'url': url, 'short': short, 'api_key': phenny.config.linx_api_key})
|
||||||
except (HTTPError, IOError):
|
except (web.HTTPError, web.ConnectionError):
|
||||||
raise GrumbleError("THE INTERNET IS FUCKING BROKEN. Please try again later.")
|
raise GrumbleError("THE INTERNET IS FUCKING BROKEN. Please try again later.")
|
||||||
|
|
||||||
data = json.loads(req)
|
data = json.loads(req)
|
||||||
|
@ -71,7 +70,7 @@ def lines(phenny, input):
|
||||||
|
|
||||||
try:
|
try:
|
||||||
req = web.post("https://linx.li/vtluuglines", {'nickname': nickname, 'date': date, 'sender': input.nick, 'channel': input.sender, 'api_key': phenny.config.linx_api_key})
|
req = web.post("https://linx.li/vtluuglines", {'nickname': nickname, 'date': date, 'sender': input.nick, 'channel': input.sender, 'api_key': phenny.config.linx_api_key})
|
||||||
except (HTTPError, IOError):
|
except (web.HTTPError, web.ConnectionError):
|
||||||
raise GrumbleError("THE INTERNET IS FUCKING BROKEN. Please try again later.")
|
raise GrumbleError("THE INTERNET IS FUCKING BROKEN. Please try again later.")
|
||||||
|
|
||||||
phenny.reply(req)
|
phenny.reply(req)
|
||||||
|
@ -89,7 +88,7 @@ def posted(phenny, input):
|
||||||
|
|
||||||
try:
|
try:
|
||||||
req = web.post("https://linx.li/vtluugposted", {'message': message, 'sender': input.nick, 'channel': input.sender, 'api_key': phenny.config.linx_api_key})
|
req = web.post("https://linx.li/vtluugposted", {'message': message, 'sender': input.nick, 'channel': input.sender, 'api_key': phenny.config.linx_api_key})
|
||||||
except (HTTPError, IOError):
|
except (web.HTTPError, web.ConnectionError):
|
||||||
raise GrumbleError("THE INTERNET IS FUCKING BROKEN. Please try again later.")
|
raise GrumbleError("THE INTERNET IS FUCKING BROKEN. Please try again later.")
|
||||||
|
|
||||||
phenny.reply(req)
|
phenny.reply(req)
|
||||||
|
|
|
@ -5,7 +5,6 @@ author: Ramblurr <unnamedrambler@gmail.com>
|
||||||
author: mutantmonkey <mutantmonkey@mutantmonkey.in>
|
author: mutantmonkey <mutantmonkey@mutantmonkey.in>
|
||||||
"""
|
"""
|
||||||
|
|
||||||
from urllib.error import HTTPError
|
|
||||||
from tools import GrumbleError
|
from tools import GrumbleError
|
||||||
import web
|
import web
|
||||||
import lxml.html
|
import lxml.html
|
||||||
|
@ -15,7 +14,7 @@ def fml(phenny, input):
|
||||||
""".fml"""
|
""".fml"""
|
||||||
try:
|
try:
|
||||||
req = web.get("http://www.fmylife.com/random")
|
req = web.get("http://www.fmylife.com/random")
|
||||||
except (HTTPError, IOError):
|
except:
|
||||||
raise GrumbleError("I tried to use .fml, but it was broken. FML")
|
raise GrumbleError("I tried to use .fml, but it was broken. FML")
|
||||||
|
|
||||||
doc = lxml.html.fromstring(req)
|
doc = lxml.html.fromstring(req)
|
||||||
|
@ -28,7 +27,7 @@ def mlia(phenny, input):
|
||||||
""".mlia - My life is average."""
|
""".mlia - My life is average."""
|
||||||
try:
|
try:
|
||||||
req = web.get("http://mylifeisaverage.com/")
|
req = web.get("http://mylifeisaverage.com/")
|
||||||
except (HTTPError, IOError):
|
except:
|
||||||
raise GrumbleError("I tried to use .mlia, but it wasn't loading. MLIA")
|
raise GrumbleError("I tried to use .mlia, but it wasn't loading. MLIA")
|
||||||
|
|
||||||
doc = lxml.html.fromstring(req)
|
doc = lxml.html.fromstring(req)
|
||||||
|
@ -42,7 +41,7 @@ def mlib(phenny, input):
|
||||||
""".mlib - My life is bro."""
|
""".mlib - My life is bro."""
|
||||||
try:
|
try:
|
||||||
req = web.get("http://mylifeisbro.com/random")
|
req = web.get("http://mylifeisbro.com/random")
|
||||||
except (HTTPError, IOError):
|
except:
|
||||||
raise GrumbleError("MLIB is out getting a case of Natty. It's chill.")
|
raise GrumbleError("MLIB is out getting a case of Natty. It's chill.")
|
||||||
|
|
||||||
doc = lxml.html.fromstring(req)
|
doc = lxml.html.fromstring(req)
|
||||||
|
@ -55,7 +54,7 @@ def mlig(phenny, input):
|
||||||
""".mlig - My life is ginger."""
|
""".mlig - My life is ginger."""
|
||||||
try:
|
try:
|
||||||
req = web.get("http://www.mylifeisginger.org/random")
|
req = web.get("http://www.mylifeisginger.org/random")
|
||||||
except (HTTPError, IOError):
|
except:
|
||||||
raise GrumbleError("Busy eating your soul. Be back soon.")
|
raise GrumbleError("Busy eating your soul. Be back soon.")
|
||||||
|
|
||||||
doc = lxml.html.fromstring(req)
|
doc = lxml.html.fromstring(req)
|
||||||
|
@ -68,7 +67,7 @@ def mlih(phenny, input):
|
||||||
""".mlih - My life is ho."""
|
""".mlih - My life is ho."""
|
||||||
try:
|
try:
|
||||||
req = web.get("http://mylifeisho.com/random")
|
req = web.get("http://mylifeisho.com/random")
|
||||||
except (HTTPError, IOError):
|
except:
|
||||||
raise GrumbleError("MLIH is giving some dome to some lax bros.")
|
raise GrumbleError("MLIH is giving some dome to some lax bros.")
|
||||||
|
|
||||||
doc = lxml.html.fromstring(req)
|
doc = lxml.html.fromstring(req)
|
||||||
|
@ -81,7 +80,7 @@ def mlihp(phenny, input):
|
||||||
""".mlihp - My life is Harry Potter."""
|
""".mlihp - My life is Harry Potter."""
|
||||||
try:
|
try:
|
||||||
req = web.get("http://www.mylifeishp.com/random")
|
req = web.get("http://www.mylifeishp.com/random")
|
||||||
except (HTTPError, IOError):
|
except:
|
||||||
raise GrumbleError("This service is not available to Muggles.")
|
raise GrumbleError("This service is not available to Muggles.")
|
||||||
|
|
||||||
doc = lxml.html.fromstring(req)
|
doc = lxml.html.fromstring(req)
|
||||||
|
@ -94,7 +93,7 @@ def mlit(phenny, input):
|
||||||
""".mlit - My life is Twilight."""
|
""".mlit - My life is Twilight."""
|
||||||
try:
|
try:
|
||||||
req = web.get("http://mylifeistwilight.com/random")
|
req = web.get("http://mylifeistwilight.com/random")
|
||||||
except (HTTPError, IOError):
|
except:
|
||||||
raise GrumbleError("Error: Your life is too Twilight. Go outside.")
|
raise GrumbleError("Error: Your life is too Twilight. Go outside.")
|
||||||
|
|
||||||
doc = lxml.html.fromstring(req)
|
doc = lxml.html.fromstring(req)
|
||||||
|
|
|
@ -7,7 +7,7 @@ Licensed under the Eiffel Forum License 2.
|
||||||
http://inamidst.com/phenny/
|
http://inamidst.com/phenny/
|
||||||
"""
|
"""
|
||||||
|
|
||||||
import re, urllib.request, urllib.parse, urllib.error
|
import re
|
||||||
import web
|
import web
|
||||||
|
|
||||||
definitions = 'https://github.com/nslater/oblique/wiki'
|
definitions = 'https://github.com/nslater/oblique/wiki'
|
||||||
|
@ -30,9 +30,9 @@ def mappings(uri):
|
||||||
|
|
||||||
def service(phenny, input, command, args):
|
def service(phenny, input, command, args):
|
||||||
t = o.services[command]
|
t = o.services[command]
|
||||||
template = t.replace('${args}', urllib.parse.quote(args, ''))
|
template = t.replace('${args}', web.quote(args, ''))
|
||||||
template = template.replace('${nick}', urllib.parse.quote(input.nick, ''))
|
template = template.replace('${nick}', web.quote(input.nick, ''))
|
||||||
uri = template.replace('${sender}', urllib.parse.quote(input.sender, ''))
|
uri = template.replace('${sender}', web.quote(input.sender, ''))
|
||||||
|
|
||||||
info = web.head(uri)
|
info = web.head(uri)
|
||||||
if isinstance(info, list):
|
if isinstance(info, list):
|
||||||
|
@ -104,7 +104,7 @@ def snippet(phenny, input):
|
||||||
if not o.services:
|
if not o.services:
|
||||||
refresh(phenny)
|
refresh(phenny)
|
||||||
|
|
||||||
search = urllib.parse.quote(input.group(2))
|
search = web.quote(input.group(2))
|
||||||
py = "BeautifulSoup.BeautifulSoup(re.sub('<.*?>|(?<= ) +', '', " + \
|
py = "BeautifulSoup.BeautifulSoup(re.sub('<.*?>|(?<= ) +', '', " + \
|
||||||
"''.join(chr(ord(c)) for c in " + \
|
"''.join(chr(ord(c)) for c in " + \
|
||||||
"eval(urllib.urlopen('http://ajax.googleapis.com/ajax/serv" + \
|
"eval(urllib.urlopen('http://ajax.googleapis.com/ajax/serv" + \
|
||||||
|
|
|
@ -4,8 +4,6 @@ rule34.py - rule 34 module
|
||||||
author: mutantmonkey <mutantmonkey@mutantmonkey.in>
|
author: mutantmonkey <mutantmonkey@mutantmonkey.in>
|
||||||
"""
|
"""
|
||||||
|
|
||||||
from urllib.parse import quote as urlquote
|
|
||||||
from urllib.error import HTTPError
|
|
||||||
from tools import GrumbleError
|
from tools import GrumbleError
|
||||||
import web
|
import web
|
||||||
import lxml.html
|
import lxml.html
|
||||||
|
@ -19,8 +17,8 @@ def rule34(phenny, input):
|
||||||
return
|
return
|
||||||
|
|
||||||
try:
|
try:
|
||||||
req = web.get("http://rule34.xxx/index.php?page=post&s=list&tags={0}".format(urlquote(q)))
|
req = web.get("http://rule34.xxx/index.php?page=post&s=list&tags={0}".format(web.quote(q)))
|
||||||
except (HTTPError, IOError):
|
except:
|
||||||
raise GrumbleError("THE INTERNET IS FUCKING BROKEN. Please try again later.")
|
raise GrumbleError("THE INTERNET IS FUCKING BROKEN. Please try again later.")
|
||||||
|
|
||||||
doc = lxml.html.fromstring(req)
|
doc = lxml.html.fromstring(req)
|
||||||
|
|
|
@ -10,24 +10,13 @@ http://inamidst.com/phenny/
|
||||||
import re
|
import re
|
||||||
import web
|
import web
|
||||||
|
|
||||||
class Grab(web.urllib.request.URLopener):
|
|
||||||
def __init__(self, *args):
|
|
||||||
self.version = 'Mozilla/5.0 (Phenny)'
|
|
||||||
web.urllib.request.URLopener.__init__(self, *args)
|
|
||||||
self.addheader('Referer', 'https://github.com/sbp/phenny')
|
|
||||||
def http_error_default(self, url, fp, errcode, errmsg, headers):
|
|
||||||
return web.urllib.addinfourl(fp, [headers, errcode], "http:" + url)
|
|
||||||
|
|
||||||
def google_ajax(query):
|
def google_ajax(query):
|
||||||
"""Search using AjaxSearch, and return its JSON."""
|
"""Search using AjaxSearch, and return its JSON."""
|
||||||
if isinstance(query, str):
|
if isinstance(query, str):
|
||||||
query = query.encode('utf-8')
|
query = query.encode('utf-8')
|
||||||
uri = 'http://ajax.googleapis.com/ajax/services/search/web'
|
uri = 'http://ajax.googleapis.com/ajax/services/search/web'
|
||||||
args = '?v=1.0&safe=off&q=' + web.quote(query)
|
args = '?v=1.0&safe=off&q=' + web.quote(query)
|
||||||
handler = web.urllib.request._urlopener
|
bytes = web.get(uri + args, headers={'Referer': 'https://github.com/sbp/phenny'})
|
||||||
web.urllib.request._urlopener = Grab()
|
|
||||||
bytes = web.get(uri + args)
|
|
||||||
web.urllib.request._urlopener = handler
|
|
||||||
return web.json(bytes)
|
return web.json(bytes)
|
||||||
|
|
||||||
def google_search(query):
|
def google_search(query):
|
||||||
|
|
|
@ -4,11 +4,11 @@ short.py - vtluug url shortner
|
||||||
author: andreim <andreim@andreim.net>
|
author: andreim <andreim@andreim.net>
|
||||||
"""
|
"""
|
||||||
|
|
||||||
from urllib.error import HTTPError
|
|
||||||
from tools import GrumbleError
|
from tools import GrumbleError
|
||||||
import web
|
import web
|
||||||
import json
|
import json
|
||||||
|
|
||||||
|
|
||||||
def short(phenny, input):
|
def short(phenny, input):
|
||||||
""".short <url> - Shorten a URL."""
|
""".short <url> - Shorten a URL."""
|
||||||
|
|
||||||
|
@ -18,11 +18,11 @@ def short(phenny, input):
|
||||||
return
|
return
|
||||||
|
|
||||||
try:
|
try:
|
||||||
req = web.post("http://vtlu.ug/vtluug", {'lurl': url})
|
r = web.post("http://vtlu.ug/vtluug", {'lurl': url})
|
||||||
except (HTTPError, IOError):
|
except:
|
||||||
raise GrumbleError("THE INTERNET IS FUCKING BROKEN. Please try again later.")
|
raise GrumbleError("THE INTERNET IS FUCKING BROKEN. Please try again later.")
|
||||||
|
|
||||||
phenny.reply(req)
|
phenny.reply(r)
|
||||||
short.rule = (['short'], r'(.*)')
|
short.rule = (['short'], r'(.*)')
|
||||||
|
|
||||||
if __name__ == '__main__':
|
if __name__ == '__main__':
|
||||||
|
|
|
@ -7,7 +7,6 @@ author: mutantmonkey <mutantmonkey@mutantmonkey.in>
|
||||||
|
|
||||||
from tools import GrumbleError
|
from tools import GrumbleError
|
||||||
from modules import weather
|
from modules import weather
|
||||||
import urllib.error
|
|
||||||
import random
|
import random
|
||||||
import metar
|
import metar
|
||||||
import web
|
import web
|
||||||
|
@ -31,7 +30,7 @@ def tfw(phenny, input, fahrenheit=False, celsius=False):
|
||||||
bytes = web.get(uri % icao_code)
|
bytes = web.get(uri % icao_code)
|
||||||
except AttributeError:
|
except AttributeError:
|
||||||
raise GrumbleError("THE INTERNET IS FUCKING BROKEN. Please try again later.")
|
raise GrumbleError("THE INTERNET IS FUCKING BROKEN. Please try again later.")
|
||||||
except urllib.error.HTTPError:
|
except web.HTTPError:
|
||||||
phenny.say("WHERE THE FUCK IS THAT? Try another location.")
|
phenny.say("WHERE THE FUCK IS THAT? Try another location.")
|
||||||
return
|
return
|
||||||
|
|
||||||
|
|
|
@ -8,7 +8,7 @@ Licensed under the Eiffel Forum License 2.
|
||||||
http://inamidst.com/phenny/
|
http://inamidst.com/phenny/
|
||||||
"""
|
"""
|
||||||
|
|
||||||
import re, urllib.request, urllib.parse, urllib.error
|
import re
|
||||||
import json
|
import json
|
||||||
import web
|
import web
|
||||||
|
|
||||||
|
@ -18,20 +18,19 @@ def translate(text, input='auto', output='en'):
|
||||||
output = output[:-4]
|
output = output[:-4]
|
||||||
raw = True
|
raw = True
|
||||||
|
|
||||||
opener = urllib.request.build_opener()
|
#opener = urllib.request.build_opener()
|
||||||
opener.addheaders = [(
|
#opener.addheaders = [(
|
||||||
'User-Agent', 'Mozilla/5.0' +
|
# 'User-Agent', 'Mozilla/5.0' +
|
||||||
'(X11; U; Linux i686)' +
|
# '(X11; U; Linux i686)' +
|
||||||
'Gecko/20071127 Firefox/2.0.0.11'
|
# 'Gecko/20071127 Firefox/2.0.0.11'
|
||||||
)]
|
#)]
|
||||||
input = urllib.parse.quote(input)
|
input = web.quote(input)
|
||||||
output = urllib.parse.quote(output.encode('utf-8'))
|
output = web.quote(output.encode('utf-8'))
|
||||||
text = urllib.parse.quote(text.encode('utf-8'))
|
text = web.quote(text.encode('utf-8'))
|
||||||
|
|
||||||
result = opener.open('http://translate.google.com/translate_a/t?' +
|
result = web.get('http://translate.google.com/translate_a/t?' +
|
||||||
('client=t&hl=en&sl=%s&tl=%s&multires=1' % (input, output)) +
|
('client=t&hl=en&sl=%s&tl=%s&multires=1' % (input, output)) +
|
||||||
('&otf=1&ssel=0&tsel=0&uptl=en&sc=1&text=%s' % text)).read()
|
('&otf=1&ssel=0&tsel=0&uptl=en&sc=1&text=%s' % text))
|
||||||
result = result.decode('utf-8')
|
|
||||||
|
|
||||||
while ',,' in result:
|
while ',,' in result:
|
||||||
result = result.replace(',,', ',null,')
|
result = result.replace(',,', ',null,')
|
||||||
|
|
|
@ -4,12 +4,11 @@ urbandict.py - urban dictionary module
|
||||||
author: mutantmonkey <mutantmonkey@mutantmonkey.in>
|
author: mutantmonkey <mutantmonkey@mutantmonkey.in>
|
||||||
"""
|
"""
|
||||||
|
|
||||||
import urllib.request
|
|
||||||
from urllib.error import HTTPError
|
|
||||||
from tools import GrumbleError
|
from tools import GrumbleError
|
||||||
import web
|
import web
|
||||||
import json
|
import json
|
||||||
|
|
||||||
|
|
||||||
def urbandict(phenny, input):
|
def urbandict(phenny, input):
|
||||||
""".urb <word> - Search Urban Dictionary for a definition."""
|
""".urb <word> - Search Urban Dictionary for a definition."""
|
||||||
|
|
||||||
|
@ -19,18 +18,18 @@ def urbandict(phenny, input):
|
||||||
return
|
return
|
||||||
|
|
||||||
# create opener
|
# create opener
|
||||||
opener = urllib.request.build_opener()
|
#opener = urllib.request.build_opener()
|
||||||
opener.addheaders = [
|
#opener.addheaders = [
|
||||||
('User-agent', web.Grab().version),
|
# ('User-agent', web.Grab().version),
|
||||||
('Referer', "http://m.urbandictionary.com"),
|
# ('Referer', "http://m.urbandictionary.com"),
|
||||||
]
|
#]
|
||||||
|
|
||||||
try:
|
try:
|
||||||
req = opener.open("http://api.urbandictionary.com/v0/define?term={0}"
|
data = web.get(
|
||||||
.format(web.quote(word)))
|
"http://api.urbandictionary.com/v0/define?term={0}".format(
|
||||||
data = req.read().decode('utf-8')
|
web.quote(word)))
|
||||||
data = json.loads(data)
|
data = json.loads(data)
|
||||||
except (HTTPError, IOError, ValueError):
|
except:
|
||||||
raise GrumbleError(
|
raise GrumbleError(
|
||||||
"Urban Dictionary slemped out on me. Try again in a minute.")
|
"Urban Dictionary slemped out on me. Try again in a minute.")
|
||||||
|
|
||||||
|
@ -39,7 +38,8 @@ def urbandict(phenny, input):
|
||||||
return
|
return
|
||||||
|
|
||||||
result = data['list'][0]
|
result = data['list'][0]
|
||||||
url = 'http://www.urbandictionary.com/define.php?term={0}'.format(web.quote(word))
|
url = 'http://www.urbandictionary.com/define.php?term={0}'.format(
|
||||||
|
web.quote(word))
|
||||||
|
|
||||||
response = "{0} - {1}".format(result['definition'].strip()[:256], url)
|
response = "{0} - {1}".format(result['definition'].strip()[:256], url)
|
||||||
phenny.say(response)
|
phenny.say(response)
|
||||||
|
|
|
@ -10,7 +10,8 @@ modified from Wikipedia module
|
||||||
author: mutantmonkey <mutantmonkey@mutantmonkey.in>
|
author: mutantmonkey <mutantmonkey@mutantmonkey.in>
|
||||||
"""
|
"""
|
||||||
|
|
||||||
import re, urllib.request, urllib.parse, urllib.error
|
import re
|
||||||
|
import web
|
||||||
import wiki
|
import wiki
|
||||||
|
|
||||||
wikiapi = 'https://vtluug.org/w/api.php?action=query&list=search&srsearch={0}&limit=1&prop=snippet&format=json'
|
wikiapi = 'https://vtluug.org/w/api.php?action=query&list=search&srsearch={0}&limit=1&prop=snippet&format=json'
|
||||||
|
@ -23,7 +24,7 @@ def vtluug(phenny, input):
|
||||||
if not origterm:
|
if not origterm:
|
||||||
return phenny.say('Perhaps you meant ".vtluug VT-Wireless"?')
|
return phenny.say('Perhaps you meant ".vtluug VT-Wireless"?')
|
||||||
|
|
||||||
term = urllib.parse.unquote(origterm)
|
term = web.unquote(origterm)
|
||||||
term = term[0].upper() + term[1:]
|
term = term[0].upper() + term[1:]
|
||||||
term = term.replace(' ', '_')
|
term = term.replace(' ', '_')
|
||||||
|
|
||||||
|
@ -31,7 +32,7 @@ def vtluug(phenny, input):
|
||||||
|
|
||||||
try:
|
try:
|
||||||
result = w.search(term)
|
result = w.search(term)
|
||||||
except IOError:
|
except web.ConnectionError:
|
||||||
error = "Can't connect to vtluug.org ({0})".format(wikiuri.format(term))
|
error = "Can't connect to vtluug.org ({0})".format(wikiuri.format(term))
|
||||||
return phenny.say(error)
|
return phenny.say(error)
|
||||||
|
|
||||||
|
|
|
@ -7,7 +7,7 @@ Licensed under the Eiffel Forum License 2.
|
||||||
http://inamidst.com/phenny/
|
http://inamidst.com/phenny/
|
||||||
"""
|
"""
|
||||||
|
|
||||||
import re, urllib.request, urllib.parse, urllib.error
|
import re
|
||||||
import metar
|
import metar
|
||||||
import web
|
import web
|
||||||
from tools import deprecated, GrumbleError
|
from tools import deprecated, GrumbleError
|
||||||
|
@ -15,7 +15,7 @@ from tools import deprecated, GrumbleError
|
||||||
r_from = re.compile(r'(?i)([+-]\d+):00 from')
|
r_from = re.compile(r'(?i)([+-]\d+):00 from')
|
||||||
|
|
||||||
def location(name):
|
def location(name):
|
||||||
name = urllib.parse.quote(name)
|
name = web.quote(name)
|
||||||
uri = 'http://ws.geonames.org/searchJSON?q=%s&maxRows=1' % name
|
uri = 'http://ws.geonames.org/searchJSON?q=%s&maxRows=1' % name
|
||||||
for i in range(10):
|
for i in range(10):
|
||||||
bytes = web.get(uri)
|
bytes = web.get(uri)
|
||||||
|
@ -81,7 +81,7 @@ def f_weather(phenny, input):
|
||||||
bytes = web.get(uri % icao_code)
|
bytes = web.get(uri % icao_code)
|
||||||
except AttributeError:
|
except AttributeError:
|
||||||
raise GrumbleError('OH CRAP NOAA HAS GONE DOWN THE WEB IS BROKEN')
|
raise GrumbleError('OH CRAP NOAA HAS GONE DOWN THE WEB IS BROKEN')
|
||||||
except urllib.error.HTTPError:
|
except web.HTTPError:
|
||||||
phenny.say("No NOAA data available for that location.")
|
phenny.say("No NOAA data available for that location.")
|
||||||
return
|
return
|
||||||
|
|
||||||
|
|
|
@ -7,7 +7,8 @@ Licensed under the Eiffel Forum License 2.
|
||||||
http://inamidst.com/phenny/
|
http://inamidst.com/phenny/
|
||||||
"""
|
"""
|
||||||
|
|
||||||
import re, urllib.request, urllib.parse, urllib.error, gzip, io
|
import re
|
||||||
|
import web
|
||||||
import wiki
|
import wiki
|
||||||
|
|
||||||
wikiapi = 'https://en.wikipedia.org/w/api.php?action=query&list=search&srsearch={0}&limit=1&prop=snippet&format=json'
|
wikiapi = 'https://en.wikipedia.org/w/api.php?action=query&list=search&srsearch={0}&limit=1&prop=snippet&format=json'
|
||||||
|
@ -20,7 +21,7 @@ def wik(phenny, input):
|
||||||
if not origterm:
|
if not origterm:
|
||||||
return phenny.say('Perhaps you meant ".wik Zen"?')
|
return phenny.say('Perhaps you meant ".wik Zen"?')
|
||||||
|
|
||||||
term = urllib.parse.unquote(origterm)
|
term = web.unquote(origterm)
|
||||||
term = term[0].upper() + term[1:]
|
term = term[0].upper() + term[1:]
|
||||||
term = term.replace(' ', '_')
|
term = term.replace(' ', '_')
|
||||||
|
|
||||||
|
@ -28,7 +29,7 @@ def wik(phenny, input):
|
||||||
|
|
||||||
try:
|
try:
|
||||||
result = w.search(term)
|
result = w.search(term)
|
||||||
except IOError:
|
except web.ConnectionError:
|
||||||
error = "Can't connect to en.wikipedia.org ({0})".format(wikiuri.format(term))
|
error = "Can't connect to en.wikipedia.org ({0})".format(wikiuri.format(term))
|
||||||
return phenny.say(error)
|
return phenny.say(error)
|
||||||
|
|
||||||
|
|
|
@ -6,7 +6,6 @@ Copyright 2012, Randy Nance, randynance.info
|
||||||
http://github.com/randynobx/phenny/
|
http://github.com/randynobx/phenny/
|
||||||
"""
|
"""
|
||||||
|
|
||||||
from urllib.error import URLError, HTTPError
|
|
||||||
from tools import GrumbleError
|
from tools import GrumbleError
|
||||||
import re
|
import re
|
||||||
import web
|
import web
|
||||||
|
@ -19,7 +18,7 @@ def wuvt(phenny, input) :
|
||||||
try:
|
try:
|
||||||
playing = web.get('http://www.wuvt.vt.edu/playlists/latest_track.php')
|
playing = web.get('http://www.wuvt.vt.edu/playlists/latest_track.php')
|
||||||
djpage = web.get('http://www.wuvt.vt.edu/playlists/current_dj.php')
|
djpage = web.get('http://www.wuvt.vt.edu/playlists/current_dj.php')
|
||||||
except (URLError, HTTPError):
|
except:
|
||||||
raise GrumbleError('Cannot connect to wuvt')
|
raise GrumbleError('Cannot connect to wuvt')
|
||||||
play= r_play.search(playing)
|
play= r_play.search(playing)
|
||||||
song = play.group(2)
|
song = play.group(2)
|
||||||
|
|
60
web.py
60
web.py
|
@ -5,50 +5,41 @@ Author: Sean B. Palmer, inamidst.com
|
||||||
About: http://inamidst.com/phenny/
|
About: http://inamidst.com/phenny/
|
||||||
"""
|
"""
|
||||||
|
|
||||||
import re, urllib.request, urllib.parse, urllib.error
|
import re
|
||||||
from html.entities import name2codepoint
|
import urllib.parse
|
||||||
|
import requests
|
||||||
import json as jsonlib
|
import json as jsonlib
|
||||||
|
|
||||||
class Grab(urllib.request.URLopener):
|
from requests.exceptions import ConnectionError, HTTPError, InvalidURL
|
||||||
def __init__(self, *args):
|
from html.entities import name2codepoint
|
||||||
self.version = 'Mozilla/5.0 (Phenny)'
|
from urllib.parse import quote, unquote
|
||||||
urllib.request.URLopener.__init__(self, *args)
|
|
||||||
def http_error_default(self, url, fp, errcode, errmsg, headers):
|
|
||||||
return urllib.addinfourl(fp, [headers, errcode], "http:" + url)
|
|
||||||
urllib.request._urlopener = Grab()
|
|
||||||
|
|
||||||
def get(uri):
|
user_agent = "Mozilla/5.0 (Phenny)"
|
||||||
|
default_headers = {'User-Agent': user_agent}
|
||||||
|
|
||||||
|
def get(uri, headers={}, verify=True, **kwargs):
|
||||||
if not uri.startswith('http'):
|
if not uri.startswith('http'):
|
||||||
return
|
return
|
||||||
u = urllib.request.urlopen(uri)
|
headers.update(default_headers)
|
||||||
bytes = u.read()
|
r = requests.get(uri, headers=headers, verify=verify, **kwargs)
|
||||||
try:
|
r.raise_for_status()
|
||||||
bytes = bytes.decode('utf-8')
|
return r.text
|
||||||
except UnicodeDecodeError:
|
|
||||||
bytes = bytes.decode('ISO-8859-1')
|
|
||||||
u.close()
|
|
||||||
return bytes
|
|
||||||
|
|
||||||
def head(uri):
|
def head(uri, headers={}, verify=True, **kwargs):
|
||||||
if not uri.startswith('http'):
|
if not uri.startswith('http'):
|
||||||
return
|
return
|
||||||
u = urllib.request.urlopen(uri)
|
headers.update(default_headers)
|
||||||
info = u.info()
|
r = requests.head(uri, headers=headers, verify=verify, **kwargs)
|
||||||
u.close()
|
r.raise_for_status()
|
||||||
return info
|
return r.headers
|
||||||
|
|
||||||
def post(uri, query):
|
def post(uri, data, headers={}, verify=True, **kwargs):
|
||||||
if not uri.startswith('http'):
|
if not uri.startswith('http'):
|
||||||
return
|
return
|
||||||
data = urllib.parse.urlencode(query).encode('utf-8')
|
headers.update(default_headers)
|
||||||
u = urllib.request.urlopen(uri, data)
|
r = requests.post(uri, data=data, headers=headers, verify=verify, **kwargs)
|
||||||
bytes = u.read()
|
r.raise_for_status()
|
||||||
try:
|
return r.text
|
||||||
bytes = bytes.decode('utf-8')
|
|
||||||
except UnicodeDecodeError:
|
|
||||||
bytes = bytes.decode('ISO-8859-1')
|
|
||||||
u.close()
|
|
||||||
return bytes
|
|
||||||
|
|
||||||
r_entity = re.compile(r'&([^;\s]+);')
|
r_entity = re.compile(r'&([^;\s]+);')
|
||||||
|
|
||||||
|
@ -62,9 +53,6 @@ def entity(match):
|
||||||
return chr(name2codepoint[value])
|
return chr(name2codepoint[value])
|
||||||
return '[' + value + ']'
|
return '[' + value + ']'
|
||||||
|
|
||||||
def quote(text):
|
|
||||||
return urllib.parse.quote(text)
|
|
||||||
|
|
||||||
def decode(html):
|
def decode(html):
|
||||||
return r_entity.sub(entity, html)
|
return r_entity.sub(entity, html)
|
||||||
|
|
||||||
|
|
Loading…
Reference in New Issue