Only the first result is being written to a csv, with one letter of the url per row. This is instead of all urls being written, one per row.
What am I not doing right in the last section of this code that is causing the cvs to be written only with one of the results instead of all of them?
import requests
from bs4 import BeautifulSoup
import csv
def grab_listings():
url = ("http://www.gym-directory.com/listing-category/gyms-fitness-centres/")
r = requests.get(url)
soup = BeautifulSoup(r.text, 'html.parser')
l_area = soup.find("div", {"class":"wlt_search_results"})
for elem in l_area.findAll("a", {"class":"frame"}):
return elem["href"]
url = ("http://www.gym-directory.com/listing-category/gyms-fitness-centres/page/2/")
r = requests.get(url)
soup = BeautifulSoup(r.text, 'html.parser')
l_area = soup.find("div", {"class":"wlt_search_results"})
for elem in l_area.findAll("a", {"class":"frame"}):
return elem["href"]
url = ("http://www.gym-directory.com/listing-category/gyms-fitness-centres/page/3/")
r = requests.get(url)
soup = BeautifulSoup(r.text, 'html.parser')
l_area = soup.find("div", {"class":"wlt_search_results"})
for elem in l_area.findAll("a", {"class":"frame"}):
return elem["href"]
url = ("http://www.gym-directory.com/listing-category/gyms-fitness-centres/page/4/")
r = requests.get(url)
soup = BeautifulSoup(r.text, 'html.parser')
l_area = soup.find("div", {"class":"wlt_search_results"})
for elem in l_area.findAll("a", {"class":"frame"}):
return elem["href"]
url = ("http://www.gym-directory.com/listing-category/gyms-fitness-centres/page/5/")
r = requests.get(url)
soup = BeautifulSoup(r.text, 'html.parser')
l_area = soup.find("div", {"class":"wlt_search_results"})
for elem in l_area.findAll("a", {"class":"frame"}):
return elem["href"]
url = ("http://www.gym-directory.com/listing-category/gyms-fitness-centres/page/6/")
r = requests.get(url)
soup = BeautifulSoup(r.text, 'html.parser')
l_area = soup.find("div", {"class":"wlt_search_results"})
for elem in l_area.findAll("a", {"class":"frame"}):
return elem["href"]
url = ("http://www.gym-directory.com/listing-category/gyms-fitness-centres/page/7/")
r = requests.get(url)
soup = BeautifulSoup(r.text, 'html.parser')
l_area = soup.find("div", {"class":"wlt_search_results"})
for elem in l_area.findAll("a", {"class":"frame"}):
return elem["href"]
url = ("http://www.gym-directory.com/listing-category/gyms-fitness-centres/page/8/")
r = requests.get(url)
soup = BeautifulSoup(r.text, 'html.parser')
l_area = soup.find("div", {"class":"wlt_search_results"})
for elem in l_area.findAll("a", {"class":"frame"}):
return elem["href"]
url = ("http://www.gym-directory.com/listing-category/gyms-fitness-centres/page/9/")
r = requests.get(url)
soup = BeautifulSoup(r.text, 'html.parser')
l_area = soup.find("div", {"class":"wlt_search_results"})
for elem in l_area.findAll("a", {"class":"frame"}):
return elem["href"]
l = grab_listings()
with open ("gyms.csv", "wb") as file:
writer = csv.writer(file)
for row in l:
writer.writerow(row)
See Question&Answers more detail:
os 与恶龙缠斗过久,自身亦成为恶龙;凝视深渊过久,深渊将回以凝视…