@@ -448,14 +448,16 @@ This key could be used in conjunction with `playwright_include_page` to make a c
448448requests using the same page. For instance:
449449
450450``` python
451+ from playwright.async_api import Page
452+
451453def start_requests (self ):
452454 yield scrapy.Request(
453455 url = " https://httpbin.org/get" ,
454456 meta = {" playwright" : True , " playwright_include_page" : True },
455457 )
456458
457459def parse (self , response , ** kwargs ):
458- page = response.meta[" playwright_page" ]
460+ page: Page = response.meta[" playwright_page" ]
459461 yield scrapy.Request(
460462 url = " https://httpbin.org/headers" ,
461463 callback = self .parse_headers,
@@ -496,6 +498,20 @@ def parse(self, response, **kwargs):
496498 # {'issuer': 'DigiCert TLS RSA SHA256 2020 CA1', 'protocol': 'TLS 1.3', 'subjectName': 'www.example.org', 'validFrom': 1647216000, 'validTo': 1678838399}
497499```
498500
501+ ### ` playwright_suggested_filename `
502+ Type ` Optional[str] ` , read only
503+
504+ The value of the [ ` Download.suggested_filename ` ] ( https://playwright.dev/python/docs/api/class-download#download-suggested-filename )
505+ attribute when the response is the binary contents of a
506+ [ download] ( https://playwright.dev/python/docs/downloads ) (e.g. a PDF file).
507+ Only available for responses that only caused a download. Can be accessed
508+ in the callback via ` response.meta['playwright_suggested_filename'] `
509+
510+ ``` python
511+ def parse (self , response , ** kwargs ):
512+ print (response.meta[" playwright_suggested_filename" ])
513+ # 'sample_file.pdf'
514+ ```
499515
500516## Receiving Page objects in callbacks
501517
@@ -514,6 +530,7 @@ necessary the spider job could get stuck because of the limit set by the
514530` PLAYWRIGHT_MAX_PAGES_PER_CONTEXT ` setting.
515531
516532``` python
533+ from playwright.async_api import Page
517534import scrapy
518535
519536class AwesomeSpiderWithPage (scrapy .Spider ):
@@ -528,7 +545,7 @@ class AwesomeSpiderWithPage(scrapy.Spider):
528545 )
529546
530547 def parse_first (self , response ):
531- page = response.meta[" playwright_page" ]
548+ page: Page = response.meta[" playwright_page" ]
532549 return scrapy.Request(
533550 url = " https://example.com" ,
534551 callback = self .parse_second,
@@ -537,13 +554,13 @@ class AwesomeSpiderWithPage(scrapy.Spider):
537554 )
538555
539556 async def parse_second (self , response ):
540- page = response.meta[" playwright_page" ]
557+ page: Page = response.meta[" playwright_page" ]
541558 title = await page.title() # "Example Domain"
542559 await page.close()
543560 return {" title" : title}
544561
545562 async def errback_close_page (self , failure ):
546- page = failure.request.meta[" playwright_page" ]
563+ page: Page = failure.request.meta[" playwright_page" ]
547564 await page.close()
548565```
549566
0 commit comments