They will provide me with these errors selenium.common.exceptions.InvalidArgumentException: Message: invalid argument: 'URL' must be a string
when I run my code they will scrape the first title when they go to scrape the second title they will provide me the error that your URL is a string these is page link https://www.google.com/maps/search/uk dentist/@31.5688259,74.2388013,12z/data=!3m1!4b1
import time
from selenium import webdriver
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.chrome.service import Service
from selenium.webdriver.common.by import By
from selenium.webdriver.support.wait import WebDriverWait
from webdriver_manager.chrome import ChromeDriverManager
options = webdriver.ChromeOptions()
# options.add_argument("--headless")
options.add_argument("--no-sandbox")
options.add_argument("--disable-gpu")
options.add_argument("--window-size=1920x1080")
options.add_argument("--disable-extensions")
chrome_driver = webdriver.Chrome(
service=Service(ChromeDriverManager().install()),
options=options
)
def supplyvan_scraper():
with chrome_driver as driver:
driver.implicitly_wait(15)
URL = 'https://www.google.com/maps/search/dentist uk/@31.5688259,74.2388013,12z/data=!3m1!4b1'
driver.get(URL)
time.sleep(3)
page_links = [element.get_attribute('href') for element in
driver.find_elements(By.XPATH, "//div[@class='Nv2PK Q2HXcd THOPZb']//a")]
# visit all the links
for link in page_links:
driver.get(link)
time.sleep(2)
title = driver.find_element(By.XPATH, "//h1[@class='DUwDvf fontHeadlineLarge']//span").text
# parse title for all the links
print(title)
# driver.back()
time.sleep(2)
time.sleep(2)
driver.quit()
supplyvan_scraper()
CodePudding user response:
In my case,it showed a little bit trouble in title element selection,besides everything is working fine.
import time
from selenium import webdriver
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.chrome.service import Service
from selenium.webdriver.common.by import By
from selenium.webdriver.support.wait import WebDriverWait
from webdriver_manager.chrome import ChromeDriverManager
import pandas as pd
options = webdriver.ChromeOptions()
# options.add_argument("--headless")
options.add_argument("--no-sandbox")
options.add_argument("--disable-gpu")
options.add_argument("--window-size=1920x1080")
options.add_argument("--disable-extensions")
chrome_driver = webdriver.Chrome(service=Service(ChromeDriverManager().install()),options=options)
data=[]
def supplyvan_scraper():
with chrome_driver as driver:
driver.implicitly_wait(15)
URL = 'https://www.google.com/maps/search/dentist uk/@31.5688259,74.2388013,12z/data=!3m1!4b1'
driver.get(URL)
time.sleep(3)
page_links = [element.get_attribute('href') for element in driver.find_elements(By.XPATH, '//*[@]')]
# visit all the links
for link in page_links:
print(link)
driver.get(link)
time.sleep(2)
title = driver.find_element(By.XPATH, '//h1[@]/span[1]').text
data.append(title)
# parse title for all the links
#print(title)
# driver.back()
time.sleep(2)
# time.sleep(2)
# driver.quit()
supplyvan_scraper()
df = pd.DataFrame(data,columns=['title'])
print(df)
Output:
title
0 YOR Dental at MediaCityUK
1 Blossom Dental Care & Implant Studio
2 Blackbrook Dental Practice
3 Greenwich Dental Practice
4 NHS Dentist
5 London Dental Centre
6 New Cross Dental Practice
7 Dental Works
8 Huntingdon Dental Care
9 Advance Dental Care - Private & NHS | Invisali...