This repository was archived by the owner on Apr 24, 2025. It is now read-only.
File tree Expand file tree Collapse file tree
projects/goodreads-quotes-scraper Expand file tree Collapse file tree Original file line number Diff line number Diff line change @@ -131,6 +131,36 @@ def getQuotesByPageNumber(url):
131131 print ("Tag not found." )
132132
133133
134+ #Not implemented!!
135+ def getTopQuotesByAuthor (url ):
136+
137+ #Realised lately that its not possible..There is a random number(u_id) associated with each author.
138+ #we will have to create a map of each u_id associated with author and then use a list to retrive data from author.
139+ '''
140+ print("There are too many authors..")
141+ print("Visit 'https://www.goodreads.com/author/on_goodreads' to get list all author details..")
142+ print("Select author and proceed below..")
143+ author = str(input("Author please.."))
144+ '''
145+ '''
146+ complete_url = url +"/tag/"+
147+ data = getResponse(complete_url)
148+ soup = BeautifulSoup(data, 'html.parser')
149+ quotes_list = [quote.text.strip().split("\n ")[0]
150+ for quote in soup.find_all('div', {'class': 'quoteText'})]
151+ author_list = [quote.text.strip().split("\n ")[2]
152+ for quote in soup.find_all('div', {'class': 'quoteText'})]
153+ combine_list = []
154+ temp = ""
155+ for q, a in zip(quotes_list, author_list):
156+ temp = "" + q.strip(' ') + " -- " + a.strip(' ')
157+ combine_list.append(temp)
158+ for ele in combine_list:
159+ print(ele)
160+ '''
161+
162+
163+
134164def main ():
135165 base_url = "https://www.goodreads.com/quotes"
136166
You can’t perform that action at this time.
0 commit comments