@@ -117,7 +117,7 @@ def _atomic_download(url: str, dest: pathlib.Path):
117117
118118
119119def _download_archive (url : str , archive_path : pathlib .Path ) -> bool :
120- """Streaming download with retry + resume support ."""
120+ """Robust streaming download with retries ."""
121121
122122 logger .debug ("Archive will be saved to: %s" , archive_path )
123123
@@ -130,80 +130,32 @@ def _download_archive(url: str, archive_path: pathlib.Path) -> bool:
130130 )
131131 session .mount ("https://" , HTTPAdapter (max_retries = retries ))
132132
133- # ------------------------------------------------------------
134- # 1. Detect total file size (HEAD is broken on Qualcomm)
135- # ------------------------------------------------------------
136133 try :
137- # NOTE:
138- # Qualcomm's download endpoint does not return accurate metadata on HEAD requests.
139- # Many Qualcomm URLs first redirect to an HTML "wrapper" page (typically ~134 bytes),
140- # and the HEAD request reflects *that wrapper* rather than the actual ZIP archive.
141- #
142- # Example:
143- # HEAD -> Content-Length: 134, Content-Type: text/html
144- # GET -> Content-Length: 1354151797, Content-Type: application/zip
145- #
146- # Because Content-Length from HEAD is frequently incorrect, we fall back to issuing
147- # a GET request with stream=True to obtain the real Content-Length without downloading
148- # the full file. This ensures correct resume logic and size validation.
149- r_head = session .get (url , stream = True )
150- r_head .raise_for_status ()
151-
152- if "content-length" not in r_head .headers :
153- logger .error ("Server did not return content-length!" )
154- return False
155-
156- total_size = int (r_head .headers ["content-length" ])
157- except Exception as e :
158- logger .exception ("Failed to determine file size: %s" , e )
159- return False
160-
161- # ------------------------------------------------------------
162- # 2. If partial file exists, resume
163- # ------------------------------------------------------------
164- downloaded = archive_path .stat ().st_size if archive_path .exists () else 0
165- if downloaded > total_size :
166- logger .warning ("Existing file is larger than expected. Removing." )
167- archive_path .unlink ()
168- downloaded = 0
169-
170- logger .info ("Resuming download from %d / %d bytes" , downloaded , total_size )
171-
172- headers = {}
173- if downloaded > 0 :
174- headers ["Range" ] = f"bytes={ downloaded } -"
175-
176- try :
177- # resume GET
178- with session .get (url , stream = True , headers = headers ) as r :
134+ with session .get (url , stream = True ) as r :
179135 r .raise_for_status ()
180136
137+ downloaded = 0
181138 chunk_size = 1024 * 1024 # 1MB
182- mode = "ab" if downloaded > 0 else "wb"
183139
184- with open (archive_path , mode ) as f :
140+ with open (archive_path , "wb" ) as f :
185141 for chunk in r .iter_content (chunk_size ):
186142 if chunk :
187143 f .write (chunk )
188144 downloaded += len (chunk )
189145
146+ logger .info ("Download completed!" )
147+
190148 except Exception as e :
191149 logger .exception ("Error during download: %s" , e )
192150 return False
193151
194- # ------------------------------------------------------------
195- # 3. Validate final size
196- # ------------------------------------------------------------
197- final_size = archive_path .stat ().st_size
198- if final_size != total_size :
199- logger .error (
200- "Download incomplete: expected %d, got %d" ,
201- total_size ,
202- final_size ,
203- )
152+ if archive_path .exists () and archive_path .stat ().st_size == 0 :
153+ logger .warning ("Downloaded file is empty!" )
154+ return False
155+ elif not archive_path .exists ():
156+ logger .error ("File was not downloaded!" )
204157 return False
205158
206- logger .info ("Download completed successfully!" )
207159 return True
208160
209161
0 commit comments