asyn.py 36 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098
  1. import asyncio
  2. import asyncio.events
  3. import functools
  4. import inspect
  5. import io
  6. import numbers
  7. import os
  8. import re
  9. import threading
  10. from contextlib import contextmanager
  11. from glob import has_magic
  12. from typing import TYPE_CHECKING, Iterable
  13. from .callbacks import DEFAULT_CALLBACK
  14. from .exceptions import FSTimeoutError
  15. from .implementations.local import LocalFileSystem, make_path_posix, trailing_sep
  16. from .spec import AbstractBufferedFile, AbstractFileSystem
  17. from .utils import glob_translate, is_exception, other_paths
  18. private = re.compile("_[^_]")
  19. iothread = [None] # dedicated fsspec IO thread
  20. loop = [None] # global event loop for any non-async instance
  21. _lock = None # global lock placeholder
  22. get_running_loop = asyncio.get_running_loop
  23. def get_lock():
  24. """Allocate or return a threading lock.
  25. The lock is allocated on first use to allow setting one lock per forked process.
  26. """
  27. global _lock
  28. if not _lock:
  29. _lock = threading.Lock()
  30. return _lock
  31. def reset_lock():
  32. """Reset the global lock.
  33. This should be called only on the init of a forked process to reset the lock to
  34. None, enabling the new forked process to get a new lock.
  35. """
  36. global _lock
  37. iothread[0] = None
  38. loop[0] = None
  39. _lock = None
  40. async def _runner(event, coro, result, timeout=None):
  41. timeout = timeout if timeout else None # convert 0 or 0.0 to None
  42. if timeout is not None:
  43. coro = asyncio.wait_for(coro, timeout=timeout)
  44. try:
  45. result[0] = await coro
  46. except Exception as ex:
  47. result[0] = ex
  48. finally:
  49. event.set()
  50. def sync(loop, func, *args, timeout=None, **kwargs):
  51. """
  52. Make loop run coroutine until it returns. Runs in other thread
  53. Examples
  54. --------
  55. >>> fsspec.asyn.sync(fsspec.asyn.get_loop(), func, *args,
  56. timeout=timeout, **kwargs)
  57. """
  58. timeout = timeout if timeout else None # convert 0 or 0.0 to None
  59. # NB: if the loop is not running *yet*, it is OK to submit work
  60. # and we will wait for it
  61. if loop is None or loop.is_closed():
  62. raise RuntimeError("Loop is not running")
  63. try:
  64. loop0 = asyncio.events.get_running_loop()
  65. if loop0 is loop:
  66. raise NotImplementedError("Calling sync() from within a running loop")
  67. except NotImplementedError:
  68. raise
  69. except RuntimeError:
  70. pass
  71. coro = func(*args, **kwargs)
  72. result = [None]
  73. event = threading.Event()
  74. asyncio.run_coroutine_threadsafe(_runner(event, coro, result, timeout), loop)
  75. while True:
  76. # this loops allows thread to get interrupted
  77. if event.wait(1):
  78. break
  79. if timeout is not None:
  80. timeout -= 1
  81. if timeout < 0:
  82. raise FSTimeoutError
  83. return_result = result[0]
  84. if isinstance(return_result, asyncio.TimeoutError):
  85. # suppress asyncio.TimeoutError, raise FSTimeoutError
  86. raise FSTimeoutError from return_result
  87. elif isinstance(return_result, BaseException):
  88. raise return_result
  89. else:
  90. return return_result
  91. def sync_wrapper(func, obj=None):
  92. """Given a function, make so can be called in blocking contexts
  93. Leave obj=None if defining within a class. Pass the instance if attaching
  94. as an attribute of the instance.
  95. """
  96. @functools.wraps(func)
  97. def wrapper(*args, **kwargs):
  98. self = obj or args[0]
  99. return sync(self.loop, func, *args, **kwargs)
  100. return wrapper
  101. @contextmanager
  102. def _selector_policy():
  103. original_policy = asyncio.get_event_loop_policy()
  104. try:
  105. if os.name == "nt" and hasattr(asyncio, "WindowsSelectorEventLoopPolicy"):
  106. asyncio.set_event_loop_policy(asyncio.WindowsSelectorEventLoopPolicy())
  107. yield
  108. finally:
  109. asyncio.set_event_loop_policy(original_policy)
  110. def get_loop():
  111. """Create or return the default fsspec IO loop
  112. The loop will be running on a separate thread.
  113. """
  114. if loop[0] is None:
  115. with get_lock():
  116. # repeat the check just in case the loop got filled between the
  117. # previous two calls from another thread
  118. if loop[0] is None:
  119. with _selector_policy():
  120. loop[0] = asyncio.new_event_loop()
  121. th = threading.Thread(target=loop[0].run_forever, name="fsspecIO")
  122. th.daemon = True
  123. th.start()
  124. iothread[0] = th
  125. return loop[0]
  126. if TYPE_CHECKING:
  127. import resource
  128. ResourceError = resource.error
  129. else:
  130. try:
  131. import resource
  132. except ImportError:
  133. resource = None
  134. ResourceError = OSError
  135. else:
  136. ResourceError = getattr(resource, "error", OSError)
  137. _DEFAULT_BATCH_SIZE = 128
  138. _NOFILES_DEFAULT_BATCH_SIZE = 1280
  139. def _get_batch_size(nofiles=False):
  140. from fsspec.config import conf
  141. if nofiles:
  142. if "nofiles_gather_batch_size" in conf:
  143. return conf["nofiles_gather_batch_size"]
  144. else:
  145. if "gather_batch_size" in conf:
  146. return conf["gather_batch_size"]
  147. if nofiles:
  148. return _NOFILES_DEFAULT_BATCH_SIZE
  149. if resource is None:
  150. return _DEFAULT_BATCH_SIZE
  151. try:
  152. soft_limit, _ = resource.getrlimit(resource.RLIMIT_NOFILE)
  153. except (ImportError, ValueError, ResourceError):
  154. return _DEFAULT_BATCH_SIZE
  155. if soft_limit == resource.RLIM_INFINITY:
  156. return -1
  157. else:
  158. return soft_limit // 8
  159. def running_async() -> bool:
  160. """Being executed by an event loop?"""
  161. try:
  162. asyncio.get_running_loop()
  163. return True
  164. except RuntimeError:
  165. return False
  166. async def _run_coros_in_chunks(
  167. coros,
  168. batch_size=None,
  169. callback=DEFAULT_CALLBACK,
  170. timeout=None,
  171. return_exceptions=False,
  172. nofiles=False,
  173. ):
  174. """Run the given coroutines in chunks.
  175. Parameters
  176. ----------
  177. coros: list of coroutines to run
  178. batch_size: int or None
  179. Number of coroutines to submit/wait on simultaneously.
  180. If -1, then it will not be any throttling. If
  181. None, it will be inferred from _get_batch_size()
  182. callback: fsspec.callbacks.Callback instance
  183. Gets a relative_update when each coroutine completes
  184. timeout: number or None
  185. If given, each coroutine times out after this time. Note that, since
  186. there are multiple batches, the total run time of this function will in
  187. general be longer
  188. return_exceptions: bool
  189. Same meaning as in asyncio.gather
  190. nofiles: bool
  191. If inferring the batch_size, does this operation involve local files?
  192. If yes, you normally expect smaller batches.
  193. """
  194. if batch_size is None:
  195. batch_size = _get_batch_size(nofiles=nofiles)
  196. if batch_size == -1:
  197. batch_size = len(coros)
  198. assert batch_size > 0
  199. async def _run_coro(coro, i):
  200. try:
  201. return await asyncio.wait_for(coro, timeout=timeout), i
  202. except Exception as e:
  203. if not return_exceptions:
  204. raise
  205. return e, i
  206. finally:
  207. callback.relative_update(1)
  208. i = 0
  209. n = len(coros)
  210. results = [None] * n
  211. pending = set()
  212. while pending or i < n:
  213. while len(pending) < batch_size and i < n:
  214. pending.add(asyncio.ensure_future(_run_coro(coros[i], i)))
  215. i += 1
  216. if not pending:
  217. break
  218. done, pending = await asyncio.wait(pending, return_when=asyncio.FIRST_COMPLETED)
  219. while done:
  220. result, k = await done.pop()
  221. results[k] = result
  222. return results
  223. # these methods should be implemented as async by any async-able backend
  224. async_methods = [
  225. "_ls",
  226. "_cat_file",
  227. "_get_file",
  228. "_put_file",
  229. "_rm_file",
  230. "_cp_file",
  231. "_pipe_file",
  232. "_expand_path",
  233. "_info",
  234. "_isfile",
  235. "_isdir",
  236. "_exists",
  237. "_walk",
  238. "_glob",
  239. "_find",
  240. "_du",
  241. "_size",
  242. "_mkdir",
  243. "_makedirs",
  244. ]
  245. class AsyncFileSystem(AbstractFileSystem):
  246. """Async file operations, default implementations
  247. Passes bulk operations to asyncio.gather for concurrent operation.
  248. Implementations that have concurrent batch operations and/or async methods
  249. should inherit from this class instead of AbstractFileSystem. Docstrings are
  250. copied from the un-underscored method in AbstractFileSystem, if not given.
  251. """
  252. # note that methods do not have docstring here; they will be copied
  253. # for _* methods and inferred for overridden methods.
  254. async_impl = True
  255. mirror_sync_methods = True
  256. disable_throttling = False
  257. def __init__(self, *args, asynchronous=False, loop=None, batch_size=None, **kwargs):
  258. self.asynchronous = asynchronous
  259. self._pid = os.getpid()
  260. if not asynchronous:
  261. self._loop = loop or get_loop()
  262. else:
  263. self._loop = None
  264. self.batch_size = batch_size
  265. super().__init__(*args, **kwargs)
  266. @property
  267. def loop(self):
  268. if self._pid != os.getpid():
  269. raise RuntimeError("This class is not fork-safe")
  270. return self._loop
  271. async def _rm_file(self, path, **kwargs):
  272. raise NotImplementedError
  273. async def _rm(self, path, recursive=False, batch_size=None, **kwargs):
  274. # TODO: implement on_error
  275. batch_size = batch_size or self.batch_size
  276. path = await self._expand_path(path, recursive=recursive)
  277. return await _run_coros_in_chunks(
  278. [self._rm_file(p, **kwargs) for p in reversed(path)],
  279. batch_size=batch_size,
  280. nofiles=True,
  281. )
  282. async def _cp_file(self, path1, path2, **kwargs):
  283. raise NotImplementedError
  284. async def _mv_file(self, path1, path2):
  285. await self._cp_file(path1, path2)
  286. await self._rm_file(path1)
  287. async def _copy(
  288. self,
  289. path1,
  290. path2,
  291. recursive=False,
  292. on_error=None,
  293. maxdepth=None,
  294. batch_size=None,
  295. **kwargs,
  296. ):
  297. if on_error is None and recursive:
  298. on_error = "ignore"
  299. elif on_error is None:
  300. on_error = "raise"
  301. if isinstance(path1, list) and isinstance(path2, list):
  302. # No need to expand paths when both source and destination
  303. # are provided as lists
  304. paths1 = path1
  305. paths2 = path2
  306. else:
  307. source_is_str = isinstance(path1, str)
  308. paths1 = await self._expand_path(
  309. path1, maxdepth=maxdepth, recursive=recursive
  310. )
  311. if source_is_str and (not recursive or maxdepth is not None):
  312. # Non-recursive glob does not copy directories
  313. paths1 = [
  314. p for p in paths1 if not (trailing_sep(p) or await self._isdir(p))
  315. ]
  316. if not paths1:
  317. return
  318. source_is_file = len(paths1) == 1
  319. dest_is_dir = isinstance(path2, str) and (
  320. trailing_sep(path2) or await self._isdir(path2)
  321. )
  322. exists = source_is_str and (
  323. (has_magic(path1) and source_is_file)
  324. or (not has_magic(path1) and dest_is_dir and not trailing_sep(path1))
  325. )
  326. paths2 = other_paths(
  327. paths1,
  328. path2,
  329. exists=exists,
  330. flatten=not source_is_str,
  331. )
  332. batch_size = batch_size or self.batch_size
  333. coros = [self._cp_file(p1, p2, **kwargs) for p1, p2 in zip(paths1, paths2)]
  334. result = await _run_coros_in_chunks(
  335. coros, batch_size=batch_size, return_exceptions=True, nofiles=True
  336. )
  337. for ex in filter(is_exception, result):
  338. if on_error == "ignore" and isinstance(ex, FileNotFoundError):
  339. continue
  340. raise ex
  341. async def _pipe_file(self, path, value, mode="overwrite", **kwargs):
  342. raise NotImplementedError
  343. async def _pipe(self, path, value=None, batch_size=None, **kwargs):
  344. if isinstance(path, str):
  345. path = {path: value}
  346. batch_size = batch_size or self.batch_size
  347. return await _run_coros_in_chunks(
  348. [self._pipe_file(k, v, **kwargs) for k, v in path.items()],
  349. batch_size=batch_size,
  350. nofiles=True,
  351. )
  352. async def _process_limits(self, url, start, end):
  353. """Helper for "Range"-based _cat_file"""
  354. size = None
  355. suff = False
  356. if start is not None and start < 0:
  357. # if start is negative and end None, end is the "suffix length"
  358. if end is None:
  359. end = -start
  360. start = ""
  361. suff = True
  362. else:
  363. size = size or (await self._info(url))["size"]
  364. start = size + start
  365. elif start is None:
  366. start = 0
  367. if not suff:
  368. if end is not None and end < 0:
  369. if start is not None:
  370. size = size or (await self._info(url))["size"]
  371. end = size + end
  372. elif end is None:
  373. end = ""
  374. if isinstance(end, numbers.Integral):
  375. end -= 1 # bytes range is inclusive
  376. return f"bytes={start}-{end}"
  377. async def _cat_file(self, path, start=None, end=None, **kwargs):
  378. raise NotImplementedError
  379. async def _cat(
  380. self, path, recursive=False, on_error="raise", batch_size=None, **kwargs
  381. ):
  382. paths = await self._expand_path(path, recursive=recursive)
  383. coros = [self._cat_file(path, **kwargs) for path in paths]
  384. batch_size = batch_size or self.batch_size
  385. out = await _run_coros_in_chunks(
  386. coros, batch_size=batch_size, nofiles=True, return_exceptions=True
  387. )
  388. if on_error == "raise":
  389. ex = next(filter(is_exception, out), False)
  390. if ex:
  391. raise ex
  392. if (
  393. len(paths) > 1
  394. or isinstance(path, list)
  395. or paths[0] != self._strip_protocol(path)
  396. ):
  397. return {
  398. k: v
  399. for k, v in zip(paths, out)
  400. if on_error != "omit" or not is_exception(v)
  401. }
  402. else:
  403. return out[0]
  404. async def _cat_ranges(
  405. self,
  406. paths,
  407. starts,
  408. ends,
  409. max_gap=None,
  410. batch_size=None,
  411. on_error="return",
  412. **kwargs,
  413. ):
  414. """Get the contents of byte ranges from one or more files
  415. Parameters
  416. ----------
  417. paths: list
  418. A list of of filepaths on this filesystems
  419. starts, ends: int or list
  420. Bytes limits of the read. If using a single int, the same value will be
  421. used to read all the specified files.
  422. """
  423. # TODO: on_error
  424. if max_gap is not None:
  425. # use utils.merge_offset_ranges
  426. raise NotImplementedError
  427. if not isinstance(paths, list):
  428. raise TypeError
  429. if not isinstance(starts, Iterable):
  430. starts = [starts] * len(paths)
  431. if not isinstance(ends, Iterable):
  432. ends = [ends] * len(paths)
  433. if len(starts) != len(paths) or len(ends) != len(paths):
  434. raise ValueError
  435. coros = [
  436. self._cat_file(p, start=s, end=e, **kwargs)
  437. for p, s, e in zip(paths, starts, ends)
  438. ]
  439. batch_size = batch_size or self.batch_size
  440. return await _run_coros_in_chunks(
  441. coros, batch_size=batch_size, nofiles=True, return_exceptions=True
  442. )
  443. async def _put_file(self, lpath, rpath, mode="overwrite", **kwargs):
  444. raise NotImplementedError
  445. async def _put(
  446. self,
  447. lpath,
  448. rpath,
  449. recursive=False,
  450. callback=DEFAULT_CALLBACK,
  451. batch_size=None,
  452. maxdepth=None,
  453. **kwargs,
  454. ):
  455. """Copy file(s) from local.
  456. Copies a specific file or tree of files (if recursive=True). If rpath
  457. ends with a "/", it will be assumed to be a directory, and target files
  458. will go within.
  459. The put_file method will be called concurrently on a batch of files. The
  460. batch_size option can configure the amount of futures that can be executed
  461. at the same time. If it is -1, then all the files will be uploaded concurrently.
  462. The default can be set for this instance by passing "batch_size" in the
  463. constructor, or for all instances by setting the "gather_batch_size" key
  464. in ``fsspec.config.conf``, falling back to 1/8th of the system limit .
  465. """
  466. if isinstance(lpath, list) and isinstance(rpath, list):
  467. # No need to expand paths when both source and destination
  468. # are provided as lists
  469. rpaths = rpath
  470. lpaths = lpath
  471. else:
  472. source_is_str = isinstance(lpath, str)
  473. if source_is_str:
  474. lpath = make_path_posix(lpath)
  475. fs = LocalFileSystem()
  476. lpaths = fs.expand_path(lpath, recursive=recursive, maxdepth=maxdepth)
  477. if source_is_str and (not recursive or maxdepth is not None):
  478. # Non-recursive glob does not copy directories
  479. lpaths = [p for p in lpaths if not (trailing_sep(p) or fs.isdir(p))]
  480. if not lpaths:
  481. return
  482. source_is_file = len(lpaths) == 1
  483. dest_is_dir = isinstance(rpath, str) and (
  484. trailing_sep(rpath) or await self._isdir(rpath)
  485. )
  486. rpath = self._strip_protocol(rpath)
  487. exists = source_is_str and (
  488. (has_magic(lpath) and source_is_file)
  489. or (not has_magic(lpath) and dest_is_dir and not trailing_sep(lpath))
  490. )
  491. rpaths = other_paths(
  492. lpaths,
  493. rpath,
  494. exists=exists,
  495. flatten=not source_is_str,
  496. )
  497. is_dir = {l: os.path.isdir(l) for l in lpaths}
  498. rdirs = [r for l, r in zip(lpaths, rpaths) if is_dir[l]]
  499. file_pairs = [(l, r) for l, r in zip(lpaths, rpaths) if not is_dir[l]]
  500. await asyncio.gather(*[self._makedirs(d, exist_ok=True) for d in rdirs])
  501. batch_size = batch_size or self.batch_size
  502. coros = []
  503. callback.set_size(len(file_pairs))
  504. for lfile, rfile in file_pairs:
  505. put_file = callback.branch_coro(self._put_file)
  506. coros.append(put_file(lfile, rfile, **kwargs))
  507. return await _run_coros_in_chunks(
  508. coros, batch_size=batch_size, callback=callback
  509. )
  510. async def _get_file(self, rpath, lpath, **kwargs):
  511. raise NotImplementedError
  512. async def _get(
  513. self,
  514. rpath,
  515. lpath,
  516. recursive=False,
  517. callback=DEFAULT_CALLBACK,
  518. maxdepth=None,
  519. **kwargs,
  520. ):
  521. """Copy file(s) to local.
  522. Copies a specific file or tree of files (if recursive=True). If lpath
  523. ends with a "/", it will be assumed to be a directory, and target files
  524. will go within. Can submit a list of paths, which may be glob-patterns
  525. and will be expanded.
  526. The get_file method will be called concurrently on a batch of files. The
  527. batch_size option can configure the amount of futures that can be executed
  528. at the same time. If it is -1, then all the files will be uploaded concurrently.
  529. The default can be set for this instance by passing "batch_size" in the
  530. constructor, or for all instances by setting the "gather_batch_size" key
  531. in ``fsspec.config.conf``, falling back to 1/8th of the system limit .
  532. """
  533. if isinstance(lpath, list) and isinstance(rpath, list):
  534. # No need to expand paths when both source and destination
  535. # are provided as lists
  536. rpaths = rpath
  537. lpaths = lpath
  538. else:
  539. source_is_str = isinstance(rpath, str)
  540. # First check for rpath trailing slash as _strip_protocol removes it.
  541. source_not_trailing_sep = source_is_str and not trailing_sep(rpath)
  542. rpath = self._strip_protocol(rpath)
  543. rpaths = await self._expand_path(
  544. rpath, recursive=recursive, maxdepth=maxdepth
  545. )
  546. if source_is_str and (not recursive or maxdepth is not None):
  547. # Non-recursive glob does not copy directories
  548. rpaths = [
  549. p for p in rpaths if not (trailing_sep(p) or await self._isdir(p))
  550. ]
  551. if not rpaths:
  552. return
  553. lpath = make_path_posix(lpath)
  554. source_is_file = len(rpaths) == 1
  555. dest_is_dir = isinstance(lpath, str) and (
  556. trailing_sep(lpath) or LocalFileSystem().isdir(lpath)
  557. )
  558. exists = source_is_str and (
  559. (has_magic(rpath) and source_is_file)
  560. or (not has_magic(rpath) and dest_is_dir and source_not_trailing_sep)
  561. )
  562. lpaths = other_paths(
  563. rpaths,
  564. lpath,
  565. exists=exists,
  566. flatten=not source_is_str,
  567. )
  568. [os.makedirs(os.path.dirname(lp), exist_ok=True) for lp in lpaths]
  569. batch_size = kwargs.pop("batch_size", self.batch_size)
  570. coros = []
  571. callback.set_size(len(lpaths))
  572. for lpath, rpath in zip(lpaths, rpaths):
  573. get_file = callback.branch_coro(self._get_file)
  574. coros.append(get_file(rpath, lpath, **kwargs))
  575. return await _run_coros_in_chunks(
  576. coros, batch_size=batch_size, callback=callback
  577. )
  578. async def _isfile(self, path):
  579. try:
  580. return (await self._info(path))["type"] == "file"
  581. except: # noqa: E722
  582. return False
  583. async def _isdir(self, path):
  584. try:
  585. return (await self._info(path))["type"] == "directory"
  586. except OSError:
  587. return False
  588. async def _size(self, path):
  589. return (await self._info(path)).get("size", None)
  590. async def _sizes(self, paths, batch_size=None):
  591. batch_size = batch_size or self.batch_size
  592. return await _run_coros_in_chunks(
  593. [self._size(p) for p in paths], batch_size=batch_size
  594. )
  595. async def _exists(self, path, **kwargs):
  596. try:
  597. await self._info(path, **kwargs)
  598. return True
  599. except FileNotFoundError:
  600. return False
  601. async def _info(self, path, **kwargs):
  602. raise NotImplementedError
  603. async def _ls(self, path, detail=True, **kwargs):
  604. raise NotImplementedError
  605. async def _walk(self, path, maxdepth=None, on_error="omit", **kwargs):
  606. if maxdepth is not None and maxdepth < 1:
  607. raise ValueError("maxdepth must be at least 1")
  608. path = self._strip_protocol(path)
  609. full_dirs = {}
  610. dirs = {}
  611. files = {}
  612. detail = kwargs.pop("detail", False)
  613. try:
  614. listing = await self._ls(path, detail=True, **kwargs)
  615. except (FileNotFoundError, OSError) as e:
  616. if on_error == "raise":
  617. raise
  618. elif callable(on_error):
  619. on_error(e)
  620. if detail:
  621. yield path, {}, {}
  622. else:
  623. yield path, [], []
  624. return
  625. for info in listing:
  626. # each info name must be at least [path]/part , but here
  627. # we check also for names like [path]/part/
  628. pathname = info["name"].rstrip("/")
  629. name = pathname.rsplit("/", 1)[-1]
  630. if info["type"] == "directory" and pathname != path:
  631. # do not include "self" path
  632. full_dirs[name] = pathname
  633. dirs[name] = info
  634. elif pathname == path:
  635. # file-like with same name as give path
  636. files[""] = info
  637. else:
  638. files[name] = info
  639. if detail:
  640. yield path, dirs, files
  641. else:
  642. yield path, list(dirs), list(files)
  643. if maxdepth is not None:
  644. maxdepth -= 1
  645. if maxdepth < 1:
  646. return
  647. for d in dirs:
  648. async for _ in self._walk(
  649. full_dirs[d], maxdepth=maxdepth, detail=detail, **kwargs
  650. ):
  651. yield _
  652. async def _glob(self, path, maxdepth=None, **kwargs):
  653. if maxdepth is not None and maxdepth < 1:
  654. raise ValueError("maxdepth must be at least 1")
  655. import re
  656. seps = (os.path.sep, os.path.altsep) if os.path.altsep else (os.path.sep,)
  657. ends_with_sep = path.endswith(seps) # _strip_protocol strips trailing slash
  658. path = self._strip_protocol(path)
  659. append_slash_to_dirname = ends_with_sep or path.endswith(
  660. tuple(sep + "**" for sep in seps)
  661. )
  662. idx_star = path.find("*") if path.find("*") >= 0 else len(path)
  663. idx_qmark = path.find("?") if path.find("?") >= 0 else len(path)
  664. idx_brace = path.find("[") if path.find("[") >= 0 else len(path)
  665. min_idx = min(idx_star, idx_qmark, idx_brace)
  666. detail = kwargs.pop("detail", False)
  667. if not has_magic(path):
  668. if await self._exists(path, **kwargs):
  669. if not detail:
  670. return [path]
  671. else:
  672. return {path: await self._info(path, **kwargs)}
  673. else:
  674. if not detail:
  675. return [] # glob of non-existent returns empty
  676. else:
  677. return {}
  678. elif "/" in path[:min_idx]:
  679. min_idx = path[:min_idx].rindex("/")
  680. root = path[: min_idx + 1]
  681. depth = path[min_idx + 1 :].count("/") + 1
  682. else:
  683. root = ""
  684. depth = path[min_idx + 1 :].count("/") + 1
  685. if "**" in path:
  686. if maxdepth is not None:
  687. idx_double_stars = path.find("**")
  688. depth_double_stars = path[idx_double_stars:].count("/") + 1
  689. depth = depth - depth_double_stars + maxdepth
  690. else:
  691. depth = None
  692. allpaths = await self._find(
  693. root, maxdepth=depth, withdirs=True, detail=True, **kwargs
  694. )
  695. pattern = glob_translate(path + ("/" if ends_with_sep else ""))
  696. pattern = re.compile(pattern)
  697. out = {
  698. p: info
  699. for p, info in sorted(allpaths.items())
  700. if pattern.match(
  701. p + "/"
  702. if append_slash_to_dirname and info["type"] == "directory"
  703. else p
  704. )
  705. }
  706. if detail:
  707. return out
  708. else:
  709. return list(out)
  710. async def _du(self, path, total=True, maxdepth=None, **kwargs):
  711. sizes = {}
  712. # async for?
  713. for f in await self._find(path, maxdepth=maxdepth, **kwargs):
  714. info = await self._info(f)
  715. sizes[info["name"]] = info["size"]
  716. if total:
  717. return sum(sizes.values())
  718. else:
  719. return sizes
  720. async def _find(self, path, maxdepth=None, withdirs=False, **kwargs):
  721. path = self._strip_protocol(path)
  722. out = {}
  723. detail = kwargs.pop("detail", False)
  724. # Add the root directory if withdirs is requested
  725. # This is needed for posix glob compliance
  726. if withdirs and path != "" and await self._isdir(path):
  727. out[path] = await self._info(path)
  728. # async for?
  729. async for _, dirs, files in self._walk(path, maxdepth, detail=True, **kwargs):
  730. if withdirs:
  731. files.update(dirs)
  732. out.update({info["name"]: info for name, info in files.items()})
  733. if not out and (await self._isfile(path)):
  734. # walk works on directories, but find should also return [path]
  735. # when path happens to be a file
  736. out[path] = {}
  737. names = sorted(out)
  738. if not detail:
  739. return names
  740. else:
  741. return {name: out[name] for name in names}
  742. async def _expand_path(self, path, recursive=False, maxdepth=None):
  743. if maxdepth is not None and maxdepth < 1:
  744. raise ValueError("maxdepth must be at least 1")
  745. if isinstance(path, str):
  746. out = await self._expand_path([path], recursive, maxdepth)
  747. else:
  748. out = set()
  749. path = [self._strip_protocol(p) for p in path]
  750. for p in path: # can gather here
  751. if has_magic(p):
  752. bit = set(await self._glob(p, maxdepth=maxdepth))
  753. out |= bit
  754. if recursive:
  755. # glob call above expanded one depth so if maxdepth is defined
  756. # then decrement it in expand_path call below. If it is zero
  757. # after decrementing then avoid expand_path call.
  758. if maxdepth is not None and maxdepth <= 1:
  759. continue
  760. out |= set(
  761. await self._expand_path(
  762. list(bit),
  763. recursive=recursive,
  764. maxdepth=maxdepth - 1 if maxdepth is not None else None,
  765. )
  766. )
  767. continue
  768. elif recursive:
  769. rec = set(await self._find(p, maxdepth=maxdepth, withdirs=True))
  770. out |= rec
  771. if p not in out and (recursive is False or (await self._exists(p))):
  772. # should only check once, for the root
  773. out.add(p)
  774. if not out:
  775. raise FileNotFoundError(path)
  776. return sorted(out)
  777. async def _mkdir(self, path, create_parents=True, **kwargs):
  778. pass # not necessary to implement, may not have directories
  779. async def _makedirs(self, path, exist_ok=False):
  780. pass # not necessary to implement, may not have directories
  781. async def open_async(self, path, mode="rb", **kwargs):
  782. if "b" not in mode or kwargs.get("compression"):
  783. raise ValueError
  784. raise NotImplementedError
  785. def mirror_sync_methods(obj):
  786. """Populate sync and async methods for obj
  787. For each method will create a sync version if the name refers to an async method
  788. (coroutine) and there is no override in the child class; will create an async
  789. method for the corresponding sync method if there is no implementation.
  790. Uses the methods specified in
  791. - async_methods: the set that an implementation is expected to provide
  792. - default_async_methods: that can be derived from their sync version in
  793. AbstractFileSystem
  794. - AsyncFileSystem: async-specific default coroutines
  795. """
  796. from fsspec import AbstractFileSystem
  797. for method in async_methods + dir(AsyncFileSystem):
  798. if not method.startswith("_"):
  799. continue
  800. smethod = method[1:]
  801. if private.match(method):
  802. isco = inspect.iscoroutinefunction(getattr(obj, method, None))
  803. unsync = getattr(getattr(obj, smethod, False), "__func__", None)
  804. is_default = unsync is getattr(AbstractFileSystem, smethod, "")
  805. if isco and is_default:
  806. mth = sync_wrapper(getattr(obj, method), obj=obj)
  807. setattr(obj, smethod, mth)
  808. if not mth.__doc__:
  809. mth.__doc__ = getattr(
  810. getattr(AbstractFileSystem, smethod, None), "__doc__", ""
  811. )
  812. class FSSpecCoroutineCancel(Exception):
  813. pass
  814. def _dump_running_tasks(
  815. printout=True, cancel=True, exc=FSSpecCoroutineCancel, with_task=False
  816. ):
  817. import traceback
  818. tasks = [t for t in asyncio.tasks.all_tasks(loop[0]) if not t.done()]
  819. if printout:
  820. [task.print_stack() for task in tasks]
  821. out = [
  822. {
  823. "locals": task._coro.cr_frame.f_locals,
  824. "file": task._coro.cr_frame.f_code.co_filename,
  825. "firstline": task._coro.cr_frame.f_code.co_firstlineno,
  826. "linelo": task._coro.cr_frame.f_lineno,
  827. "stack": traceback.format_stack(task._coro.cr_frame),
  828. "task": task if with_task else None,
  829. }
  830. for task in tasks
  831. ]
  832. if cancel:
  833. for t in tasks:
  834. cbs = t._callbacks
  835. t.cancel()
  836. asyncio.futures.Future.set_exception(t, exc)
  837. asyncio.futures.Future.cancel(t)
  838. [cb[0](t) for cb in cbs] # cancels any dependent concurrent.futures
  839. try:
  840. t._coro.throw(exc) # exits coro, unless explicitly handled
  841. except exc:
  842. pass
  843. return out
  844. class AbstractAsyncStreamedFile(AbstractBufferedFile):
  845. # no read buffering, and always auto-commit
  846. # TODO: readahead might still be useful here, but needs async version
  847. async def read(self, length=-1):
  848. """
  849. Return data from cache, or fetch pieces as necessary
  850. Parameters
  851. ----------
  852. length: int (-1)
  853. Number of bytes to read; if <0, all remaining bytes.
  854. """
  855. length = -1 if length is None else int(length)
  856. if self.mode != "rb":
  857. raise ValueError("File not in read mode")
  858. if length < 0:
  859. length = self.size - self.loc
  860. if self.closed:
  861. raise ValueError("I/O operation on closed file.")
  862. if length == 0:
  863. # don't even bother calling fetch
  864. return b""
  865. out = await self._fetch_range(self.loc, self.loc + length)
  866. self.loc += len(out)
  867. return out
  868. async def write(self, data):
  869. """
  870. Write data to buffer.
  871. Buffer only sent on flush() or if buffer is greater than
  872. or equal to blocksize.
  873. Parameters
  874. ----------
  875. data: bytes
  876. Set of bytes to be written.
  877. """
  878. if self.mode not in {"wb", "ab"}:
  879. raise ValueError("File not in write mode")
  880. if self.closed:
  881. raise ValueError("I/O operation on closed file.")
  882. if self.forced:
  883. raise ValueError("This file has been force-flushed, can only close")
  884. out = self.buffer.write(data)
  885. self.loc += out
  886. if self.buffer.tell() >= self.blocksize:
  887. await self.flush()
  888. return out
  889. async def close(self):
  890. """Close file
  891. Finalizes writes, discards cache
  892. """
  893. if getattr(self, "_unclosable", False):
  894. return
  895. if self.closed:
  896. return
  897. if self.mode == "rb":
  898. self.cache = None
  899. else:
  900. if not self.forced:
  901. await self.flush(force=True)
  902. if self.fs is not None:
  903. self.fs.invalidate_cache(self.path)
  904. self.fs.invalidate_cache(self.fs._parent(self.path))
  905. self.closed = True
  906. async def flush(self, force=False):
  907. if self.closed:
  908. raise ValueError("Flush on closed file")
  909. if force and self.forced:
  910. raise ValueError("Force flush cannot be called more than once")
  911. if force:
  912. self.forced = True
  913. if self.mode not in {"wb", "ab"}:
  914. # no-op to flush on read-mode
  915. return
  916. if not force and self.buffer.tell() < self.blocksize:
  917. # Defer write on small block
  918. return
  919. if self.offset is None:
  920. # Initialize a multipart upload
  921. self.offset = 0
  922. try:
  923. await self._initiate_upload()
  924. except:
  925. self.closed = True
  926. raise
  927. if await self._upload_chunk(final=force) is not False:
  928. self.offset += self.buffer.seek(0, 2)
  929. self.buffer = io.BytesIO()
  930. async def __aenter__(self):
  931. return self
  932. async def __aexit__(self, exc_type, exc_val, exc_tb):
  933. await self.close()
  934. async def _fetch_range(self, start, end):
  935. raise NotImplementedError
  936. async def _initiate_upload(self):
  937. pass
  938. async def _upload_chunk(self, final=False):
  939. raise NotImplementedError