I've been trying to scrape an ASP.NET site that has a lot of postback functions to access pages. To get the arguments I usually just inspect the element and use the element value and id for __EVENTTARGET and __EVENTARGUMENT but this time it didn't work, I'm guessing because I got the arguments wrong or I'm posting to the wrong page. Looking at the network tab shows the arguments as blank. Here's what I've been trying:
'''
import requests
from bs4 import BeautifulSoup
import request_url
import pandas as pd
from database import Database
from database import CursorFromConnectionFromPool
from psycopg2 import sql
import json
import datetime
from nameparser import HumanName
from nameparser.config import CONSTANTS
import html
import re
from selenium import webdriver
from selenium.webdriver.chrome.options import Options
import time
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.common.by import By
from database_tables import DatabaseTables
import requests
from bs4 import BeautifulSoup
headers = {'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/87.0.4280.88 Safari/537.36'}
target_url = 'http://alisondb.legislature.state.al.us/Alison/SESSResosByHouseSponsorSelect.aspx'
select_session_url = 'http://alisondb.legislature.state.al.us/Alison/SelectSession.aspx'
session = requests.Session()
# change event argument to change session year, currently set to 2019
session_payload = {"__EVENTTARGET":"ctl00$ContentPlaceHolder1$gvSessions", "__EVENTARGUMENT": "$3"}
session.post(select_session_url, session_payload, headers)
# Set session By Sponsor
session_payload = {"__EVENTTARGET":'ctl00$ContentPlaceHolder1$btnSponsor', "__EVENTARGUMENT": "ContentPlaceHolder1_btnSponsor"}
session.post('http://alisondb.legislature.state.al.us/Alison/SESSResosByHouseSponsorSelect.aspx', session_payload, headers)
sponsor_url = 'http://alisondb.legislature.state.al.us/Alison/SESSResosList.aspx?NAME=Allen&SPONSOROID=100571&BODY=1755&SESSNAME=Regular%20Session%202019'
# Set session By Sponsor
session_payload = {"__EVENTTARGET":'ctl00$ContentPlaceHolder1$ImageButton2', "__EVENTARGUMENT": "ContentPlaceHolder1_ImageButton2"}
session.post('http://alisondb.legislature.state.al.us/Alison/SESSResosByHouseSponsorSelect.aspx', session_payload, headers)
page = session.get(sponsor_url)
member_soup = BeautifulSoup(page.text, 'lxml')
member = member_soup.find('body')
print(member.text)
'''
question from:
https://stackoverflow.com/questions/65926100/web-scraping-getting-dopostback-arguments 与恶龙缠斗过久,自身亦成为恶龙;凝视深渊过久,深渊将回以凝视…