Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
def authorize(auth_url, user, password):
browser = mechanicalsoup.StatefulBrowser(raise_on_404=True)
logger.info("opening auth_url")
response = browser.open(auth_url)
assert(response.ok)
browser.select_form('form[action="/vpn-user-portal/_form/auth/verify"]')
browser["userName"] = user
browser["userPass"] = password
logger.info("logging in")
response = browser.submit_selected()
assert(response.ok)
form = browser.select_form()
if 'action' in form.form.attrs and form.form.attrs['action'] == '/vpn-user-portal/_two_factor/auth/verify/totp':
raise EduvpnAuthException("otp enabled")
assert(urlparse(browser.get_url()).path == "/vpn-user-portal/_oauth/authorize") # make sure is the right page
form = browser.select_form()
form.form.select('button[value="yes"]')
logger.info("authorising app")
def test_link_arg_multiregex(httpbin):
browser = mechanicalsoup.StatefulBrowser()
browser.open_fake_page('<a href="/get">Link</a>', httpbin.url)
with pytest.raises(ValueError, match="link parameter cannot be .*"):
browser.follow_link('foo', url_regex='bar')
def test_launch_browser(mocker):
browser = mechanicalsoup.StatefulBrowser()
browser.set_debug(True)
browser.open_fake_page('')
mocker.patch('webbrowser.open')
with pytest.raises(mechanicalsoup.LinkNotFoundError):
browser.follow_link('nosuchlink')
# mock.assert_called_once() not available on some versions :-(
assert webbrowser.open.call_count == 1
mocker.resetall()
with pytest.raises(mechanicalsoup.LinkNotFoundError):
browser.select_form('nosuchlink')
# mock.assert_called_once() not available on some versions :-(
assert webbrowser.open.call_count == 1
def fill_form(args):
browser = StatefulBrowser()
cookie_jar = LWPCookieJar()
browser.set_cookiejar(cookie_jar)
room, date, from_time, to_time = args[0], args[1].split(
"/"), args[2][:2] + ":" + args[2][2:], args[3][:2] + ":" + args[3][2:]
day, month, year = date[0], date[1], date[2]
if sys.version_info[0] < 3:
name = raw_input("Name of society: ")
person = raw_input("Your name: ")
email = raw_input("Your email: ")
number = raw_input("Your number: ")
else:
name = input("Name of society: ")
person = input("Your name: ")
email = input("Your email: ")
number = input("Your number: ")
browser.open("http://www.dcu.ie/registry/booking.shtml")
def get_codename(device, version, build):
version = version.split('.')[0] + ".x"
url = "https://www.theiphonewiki.com/wiki/Firmware_Keys/" + version
br = mechanicalsoup.StatefulBrowser()
html = br.open(url) #.read()
i = 0
checker = False
data = br.get_current_page().find_all('a')
device = "(%s)" % device
for hit in data:
# some beta may have the same codename, first in first out
if checker is False:
try:
if data[i].get('href').split('_')[1] == build and data[i].get('href').split('_')[2] == device:
checker = True
codename = data[i].get('href').split('/')[2].split('_')[0]
return codename
def __repr__(
fitur = {
'features':'html.parser',
},
uag = useragent()
):
return StatefulBrowser(
soup_config = fitur,
user_agent = useragent
)
import re
from requests_html import HTMLSession, HTML
from datetime import datetime
from urllib.parse import quote
from lxml.etree import ParserError
import mechanicalsoup
session = HTMLSession()
browser = mechanicalsoup.StatefulBrowser()
browser.addheaders = [('User-agent', 'Firefox')]
def get_tweets(query, pages=25):
"""Gets tweets for a given user, via the Twitter frontend API."""
after_part = f'include_available_features=1&include_entities=1&include_new_items_bar=true'
if query.startswith('#'):
query = quote(query)
url = f'https://twitter.com/i/search/timeline?f=tweets&vertical=default&q={query}&src=tyah&reset_error_state=false&'
else:
url = f'https://twitter.com/i/profiles/show/{query}/timeline/tweets?'
url += after_part
headers = {
'Accept': 'application/json, text/javascript, */*; q=0.01',
'Referer': f'https://twitter.com/{query}',
def __init__(self, logindata, chatter):
self.logindata = logindata
self.browser = mechanicalsoup.StatefulBrowser()
self.quiet = chatter['quiet']
self.debug = chatter['debug']
if self.debug:
# make a directory for debugging output
self.debugdir = tempfile.mkdtemp(prefix='aib2ofx_')
print('WARNING: putting *sensitive* debug data in %s' %
self.debugdir)
self.logger = logging.getLogger("mechanize")
fh = logging.FileHandler(self.debugdir + '/mechanize.log', 'w')
fm = CleansingFormatter('%(asctime)s\n%(message)s')
fh.setFormatter(fm)
self.logger.addHandler(fh)
self.logger.setLevel(logging.DEBUG)
# FIXME: better logging for page *content*
def get_google_trends(un, pw):
br = mechanize.StatefulBrowser()
br.addheaders = [
('User-agent',
'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.9.0.1) Gecko/2008071615 Fedora/3.0.1-1.fc9 Firefox/3.0.1'
)]
response = br.open('https://accounts.google.com/ServiceLogin?hl=en&continue=https://www.google.com/')
response.soup.find_all("input", {"id": "Email"})[0]['value'] = un
response.soup.find_all("input", {"id": "Passwd-hidden"})[0]['value'] = pw
form = response.soup.select("form")[0]
print(form)
form_response = br.open(form.click())
print(form_response)
# google no longer provides tabular trends data:
table = br.open("http://www.google.com/trends/trendsReport?q=SearchTerm&export=1")
return pd.read_csv(io.StringIO(table.text))
def build_timetable(self, room):
browser = StatefulBrowser()
cookie_jar = LWPCookieJar()
browser.set_cookiejar(cookie_jar)
browser.user_agent = [
("User-Agent",
"""Mozilla/5.0 (Windows NT 10.0; Win64; x64)
AppleWebKit/537.36 (KHTML, like Gecko)
Chrome/58.0.3029.110
Safari/537.36""")]
url = "http://www101.dcu.ie/timetables/feed.php?room=GLA." + \
room + "&week1=" + self.week + \
"&hour=" + str(self.hour) + \
"&day=" + self.day + "&template=location"
browser.open(url, verify=False)
result = utils.check_room(url)
return result