Compare commits
222 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
bd696fb4db | ||
|
|
23b400f0fb | ||
|
|
a1a385196b | ||
|
|
a0046439d1 | ||
|
|
63a8476575 | ||
|
|
e96f4f3cc0 | ||
|
|
cd5d0dfc3b | ||
|
|
ece41cdb06 | ||
|
|
c362d54b1a | ||
|
|
543e4fe582 | ||
|
|
53fca7016e | ||
|
|
4b6529c3a5 | ||
|
|
8957147926 | ||
|
|
4c7392ed17 | ||
|
|
508de4aefb | ||
|
|
3d39992280 | ||
|
|
b462836540 | ||
|
|
2795660c28 | ||
|
|
3dc87753ea | ||
|
|
645cc19037 | ||
|
|
86d6acccf7 | ||
|
|
0f5db35b6e | ||
|
|
7c6742a60a | ||
|
|
4fa32a98ed | ||
|
|
36ace8017d | ||
|
|
35f4071c0b | ||
|
|
ead0bce96e | ||
|
|
86b00091a9 | ||
|
|
2a2928b4a0 | ||
|
|
d47133e5bf | ||
|
|
8f0c58dafa | ||
|
|
27a721c7dd | ||
|
|
3e964d5319 | ||
|
|
84a31ae0b4 | ||
|
|
891b533ec2 | ||
|
|
b9fb3e4979 | ||
|
|
09342982a4 | ||
|
|
da8c49011e | ||
|
|
b805f0a010 | ||
|
|
5b0feb3d20 | ||
|
|
ecbfc2957d | ||
|
|
e96248dec7 | ||
|
|
7d0045f03c | ||
|
|
c3d7449844 | ||
|
|
a4f11b0243 | ||
|
|
1702fd0797 | ||
|
|
464b3333d7 | ||
|
|
685f2ec351 | ||
|
|
aad46baf28 | ||
|
|
a97db0aac6 | ||
|
|
af5f96f97e | ||
|
|
a4bdaea888 | ||
|
|
ac5a9d2793 | ||
|
|
b17ad32a47 | ||
|
|
af39855e28 | ||
|
|
ac6e047f0d | ||
|
|
1e24337f29 | ||
|
|
2cc82ae12f | ||
|
|
d11f385049 | ||
|
|
7377611e1f | ||
|
|
f3b5fb85c9 | ||
|
|
a4faef83ac | ||
|
|
e1184f745b | ||
|
|
fe630008e9 | ||
|
|
b43072cf0a | ||
|
|
ad3f4cabc9 | ||
|
|
f70567872c | ||
|
|
a8ade72113 | ||
|
|
1dcc8c9c8b | ||
|
|
dd5462b307 | ||
|
|
e39c03e8e3 | ||
|
|
9297504b84 | ||
|
|
3971115ab9 | ||
|
|
b5badbbc61 | ||
|
|
ba8621f5be | ||
|
|
8e5c94a4eb | ||
|
|
66a1c1a174 | ||
|
|
ab6214df79 | ||
|
|
dc5d42c8e2 | ||
|
|
ab75495cd3 | ||
|
|
39c1ecc7a2 | ||
|
|
af7720668c | ||
|
|
9051fba601 | ||
|
|
03ea6acec0 | ||
|
|
ddc93033d7 | ||
|
|
eb6d830e2a | ||
|
|
2b0ae5a6c1 | ||
|
|
1636839b67 | ||
|
|
65b97d024b | ||
|
|
fb77d35863 | ||
|
|
197d2968e3 | ||
|
|
7460dbea17 | ||
|
|
b49fd797fc | ||
|
|
6bd8fb2290 | ||
|
|
cd1e16ad9e | ||
|
|
3fd9ea2204 | ||
|
|
d5a1266cbe | ||
|
|
89bbe8ad4c | ||
|
|
e44c6f8b0e | ||
|
|
0ba810fda5 | ||
|
|
677bbfed8b | ||
|
|
97671b78dd | ||
|
|
2865c0df9f | ||
|
|
0c037ddd12 | ||
|
|
3ee4674098 | ||
|
|
5d9a91da4a | ||
|
|
47c579ff22 | ||
|
|
caf5cba801 | ||
|
|
486c7894ce | ||
|
|
db8a00edae | ||
|
|
805523b924 | ||
|
|
32ab2e648d | ||
|
|
4d91ae740a | ||
|
|
05ec4b4312 | ||
|
|
cd2c1ada14 | ||
|
|
4ca9642403 | ||
|
|
b438f29a71 | ||
|
|
4db178b8d6 | ||
|
|
38637a9821 | ||
|
|
de8c0bdcdd | ||
|
|
fd35975cf9 | ||
|
|
1495834a09 | ||
|
|
2a7588dead | ||
|
|
051de748b9 | ||
|
|
97adb30d41 | ||
|
|
eacfbc45c0 | ||
|
|
8deddd7ee9 | ||
|
|
beb494b67e | ||
|
|
e2948a8b48 | ||
|
|
ff3d3f2f78 | ||
|
|
85783da515 | ||
|
|
9dbfad4294 | ||
|
|
5e54b92efd | ||
|
|
cffdbd47b5 | ||
|
|
f398f46509 | ||
|
|
097c76aa46 | ||
|
|
a9da16e048 | ||
|
|
8e5f0984af | ||
|
|
38b738e766 | ||
|
|
55772d30a4 | ||
|
|
382285cfd9 | ||
|
|
d2e5ce284e | ||
|
|
88d21d742d | ||
|
|
7a0356d47b | ||
|
|
a13bf0cd6c | ||
|
|
7cacf233ce | ||
|
|
b48212e420 | ||
|
|
f10f9970b2 | ||
|
|
96ff214107 | ||
|
|
e7bf3607e8 | ||
|
|
2883362a0e | ||
|
|
df7af507f0 | ||
|
|
46dbed3e7e | ||
|
|
46d5579caa | ||
|
|
11a3a9d457 | ||
|
|
6dca1eea96 | ||
|
|
85ef53c6bb | ||
|
|
4c41ba0a50 | ||
|
|
6f60a78262 | ||
|
|
8f083818c3 | ||
|
|
791c845d23 | ||
|
|
aeea23229f | ||
|
|
e91ffe4844 | ||
|
|
df9d456cf6 | ||
|
|
4c89e8aefa | ||
|
|
7ddce7f80b | ||
|
|
b3dbbc46e2 | ||
|
|
762d446661 | ||
|
|
1aa3c3d9a8 | ||
|
|
0f6ad3290d | ||
|
|
e26a4c5a1c | ||
|
|
d963e3fe1c | ||
|
|
0cd54486d0 | ||
|
|
f93c3d76ce | ||
|
|
8bf7576b33 | ||
|
|
2eae33bd33 | ||
|
|
5e333f53ee | ||
|
|
9c249a100f | ||
|
|
0ee3d6d72d | ||
|
|
3c218b81a3 | ||
|
|
80dc0e8488 | ||
|
|
4064ec53c3 | ||
|
|
37ac9bd1d5 | ||
|
|
e234b8c5ab | ||
|
|
efc56c43c2 | ||
|
|
50de008820 | ||
|
|
d7baa0713e | ||
|
|
3b19ef12bc | ||
|
|
dfb15e6778 | ||
|
|
379b87d925 | ||
|
|
b856041b53 | ||
|
|
b3b36c5cc9 | ||
|
|
ab1476c0d1 | ||
|
|
566a38b432 | ||
|
|
96e4532a9d | ||
|
|
bd3569367e | ||
|
|
20680b0e38 | ||
|
|
44e8d2b46b | ||
|
|
80c659be71 | ||
|
|
06640102f8 | ||
|
|
a0c47c9944 | ||
|
|
744e70ffff | ||
|
|
e6211896f7 | ||
|
|
ca27d070f0 | ||
|
|
82b99b5c9e | ||
|
|
c5c1567321 | ||
|
|
1adc908788 | ||
|
|
2970d9460f | ||
|
|
f0b5db234a | ||
|
|
c6f760e61c | ||
|
|
6067d2a590 | ||
|
|
c56e3496db | ||
|
|
55fd565ef0 | ||
|
|
231d985c82 | ||
|
|
0f433d7e5d | ||
|
|
e188c7e41f | ||
|
|
fa7d743826 | ||
|
|
f4b3348c8e | ||
|
|
724118a671 | ||
|
|
ea95d718ee | ||
|
|
9ba3d5a1ea | ||
|
|
1ed4b4b65d |
35
.github/ISSUE_TEMPLATE/bug_report.md
vendored
35
.github/ISSUE_TEMPLATE/bug_report.md
vendored
@@ -7,14 +7,37 @@ assignees: ''
|
||||
|
||||
---
|
||||
|
||||
*** READ BEFORE POSTING ***
|
||||
# IMPORTANT
|
||||
|
||||
Before posting an issue - please upgrade to the latest version and confirm the issue/bug is still there.
|
||||
If you want help, you got to read this first, follow the instructions.
|
||||
|
||||
### Are you up-to-date?
|
||||
|
||||
Upgrade to the latest version and confirm the issue/bug is still there.
|
||||
|
||||
Upgrade using:
|
||||
`$ pip install yfinance --upgrade --no-cache-dir`
|
||||
|
||||
Bug still there? Delete this content and submit your bug report here and provide the following, as best you can:
|
||||
Confirm by running:
|
||||
|
||||
- Simple code that reproduces your problem
|
||||
- The error message
|
||||
`import yfinance as yf ; print(yf.__version__)`
|
||||
|
||||
and comparing against [PIP](https://pypi.org/project/yfinance/#history).
|
||||
|
||||
### Does Yahoo actually have the data?
|
||||
|
||||
Are you spelling ticker *exactly* same as Yahoo?
|
||||
|
||||
Then visit `finance.yahoo.com` and confirm they have the data you want. Maybe your ticker was delisted, or your expectations of `yfinance` are wrong.
|
||||
|
||||
### Are you spamming Yahoo?
|
||||
|
||||
Yahoo Finance free service has rate-limiting depending on request type - roughly 60/minute for prices, 10/minute for info. Once limit hit, Yahoo can delay, block, or return bad data. Not a `yfinance` bug.
|
||||
|
||||
### Still think it's a bug?
|
||||
|
||||
Delete this default message (all of it) and submit your bug report here, providing the following as best you can:
|
||||
|
||||
- Simple code that reproduces your problem, that we can copy-paste-run
|
||||
- Exception message with full traceback, or proof `yfinance` returning bad data
|
||||
- `yfinance` version and Python version
|
||||
- Operating system type
|
||||
|
||||
14
.github/ISSUE_TEMPLATE/feature_request.md
vendored
Normal file
14
.github/ISSUE_TEMPLATE/feature_request.md
vendored
Normal file
@@ -0,0 +1,14 @@
|
||||
---
|
||||
name: Feature request
|
||||
about: Request a new feature
|
||||
title: ''
|
||||
labels: ''
|
||||
assignees: ''
|
||||
|
||||
---
|
||||
|
||||
**Describe the problem**
|
||||
|
||||
**Describe the solution**
|
||||
|
||||
**Additional context**
|
||||
@@ -1,6 +1,75 @@
|
||||
Change Log
|
||||
===========
|
||||
|
||||
0.2.11
|
||||
------
|
||||
Fix history_metadata accesses for unusual symbols #1411
|
||||
|
||||
0.2.10
|
||||
------
|
||||
General
|
||||
- allow using sqlite3 < 3.8.2 #1380
|
||||
- add another backup decrypt option #1379
|
||||
Prices
|
||||
- restore original download() timezone handling #1385
|
||||
- fix & improve price repair #1289 2a2928b 86d6acc
|
||||
- drop intraday intervals if in post-market but prepost=False #1311
|
||||
Info
|
||||
- fast_info improvements:
|
||||
- add camelCase keys, add dict functions values() & items() #1368
|
||||
- fix fast_info["previousClose"] #1383
|
||||
- catch TypeError Exception #1397
|
||||
|
||||
0.2.9
|
||||
-----
|
||||
- Fix fast_info bugs #1362
|
||||
|
||||
0.2.7
|
||||
-----
|
||||
- Fix Yahoo decryption, smarter this time #1353
|
||||
- Rename basic_info -> fast_info #1354
|
||||
|
||||
0.2.6
|
||||
-----
|
||||
- Fix Ticker.basic_info lazy-loading #1342
|
||||
|
||||
0.2.5
|
||||
-----
|
||||
- Fix Yahoo data decryption again #1336
|
||||
- New: Ticker.basic_info - faster Ticker.info #1317
|
||||
|
||||
0.2.4
|
||||
-----
|
||||
- Fix Yahoo data decryption #1297
|
||||
- New feature: 'Ticker.get_shares_full()' #1301
|
||||
- Improve caching of financials data #1284
|
||||
- Restore download() original alignment behaviour #1283
|
||||
- Fix the database lock error in multithread download #1276
|
||||
|
||||
0.2.3
|
||||
-----
|
||||
- Make financials API '_' use consistent
|
||||
|
||||
0.2.2
|
||||
-----
|
||||
- Restore 'financials' attribute (map to 'income_stmt')
|
||||
|
||||
0.2.1
|
||||
-----
|
||||
Release!
|
||||
|
||||
0.2.0rc5
|
||||
--------
|
||||
- Improve financials error handling #1243
|
||||
- Fix '100x price' repair #1244
|
||||
|
||||
0.2.0rc4
|
||||
--------
|
||||
- Access to old financials tables via `get_income_stmt(legacy=True)`
|
||||
- Optimise scraping financials & fundamentals, 2x faster
|
||||
- Add 'capital gains' alongside dividends & splits for ETFs, and metadata available via `history_metadata`, plus a bunch of price fixes
|
||||
For full list of changes see #1238
|
||||
|
||||
0.2.0rc2
|
||||
--------
|
||||
Financials
|
||||
|
||||
177
README.md
177
README.md
@@ -42,6 +42,11 @@ Yahoo! finance API is intended for personal use only.**
|
||||
|
||||
---
|
||||
|
||||
## News [2023-01-27]
|
||||
Since December 2022 Yahoo has been encrypting the web data that `yfinance` scrapes for non-market data. Fortunately the decryption keys are available, although Yahoo moved/changed them several times hence `yfinance` breaking several times. `yfinance` is now better prepared for any future changes by Yahoo.
|
||||
|
||||
Why is Yahoo doing this? We don't know. Is it to stop scrapers? Maybe, so we've implemented changes to reduce load on Yahoo. In December we rolled out version 0.2 with optimised scraping. Then in 0.2.6 introduced `Ticker.fast_info`, providing much faster access to some `info` elements wherever possible e.g. price stats and forcing users to switch (sorry but we think necessary). `info` will continue to exist for as long as there are elements without a fast alternative.
|
||||
|
||||
## Quick Start
|
||||
|
||||
### The Ticker module
|
||||
@@ -53,43 +58,44 @@ import yfinance as yf
|
||||
|
||||
msft = yf.Ticker("MSFT")
|
||||
|
||||
# get stock info
|
||||
# get all stock info (slow)
|
||||
msft.info
|
||||
# fast access to subset of stock info (opportunistic)
|
||||
msft.fast_info
|
||||
|
||||
# get historical market data
|
||||
hist = msft.history(period="max")
|
||||
hist = msft.history(period="1mo")
|
||||
|
||||
# show actions (dividends, splits)
|
||||
# show meta information about the history (requires history() to be called first)
|
||||
msft.history_metadata
|
||||
|
||||
# show actions (dividends, splits, capital gains)
|
||||
msft.actions
|
||||
|
||||
# show dividends
|
||||
msft.dividends
|
||||
|
||||
# show splits
|
||||
msft.splits
|
||||
msft.capital_gains # only for mutual funds & etfs
|
||||
|
||||
# show share count
|
||||
# - yearly summary:
|
||||
msft.shares
|
||||
# - accurate time-series count:
|
||||
msft.get_shares_full(start="2022-01-01", end=None)
|
||||
|
||||
# show income statement
|
||||
# show financials:
|
||||
# - income statement
|
||||
msft.income_stmt
|
||||
msft.quarterly_income_stmt
|
||||
|
||||
# show balance sheet
|
||||
# - balance sheet
|
||||
msft.balance_sheet
|
||||
msft.quarterly_balance_sheet
|
||||
|
||||
# show cash flow statement
|
||||
# - cash flow statement
|
||||
msft.cashflow
|
||||
msft.quarterly_cashflow
|
||||
# see `Ticker.get_income_stmt()` for more options
|
||||
|
||||
# show major holders
|
||||
# show holders
|
||||
msft.major_holders
|
||||
|
||||
# show institutional holders
|
||||
msft.institutional_holders
|
||||
|
||||
# show mutualfund holders
|
||||
msft.mutualfund_holders
|
||||
|
||||
# show earnings
|
||||
@@ -104,14 +110,15 @@ msft.recommendations
|
||||
msft.recommendations_summary
|
||||
# show analysts other work
|
||||
msft.analyst_price_target
|
||||
mfst.revenue_forecasts
|
||||
mfst.earnings_forecasts
|
||||
mfst.earnings_trend
|
||||
msft.revenue_forecasts
|
||||
msft.earnings_forecasts
|
||||
msft.earnings_trend
|
||||
|
||||
# show next event (earnings, etc)
|
||||
msft.calendar
|
||||
|
||||
# show all earnings dates
|
||||
# Show future and historic earnings dates, returns at most next 4 quarters and last 8 quarters by default.
|
||||
# Note: If more are needed use msft.get_earnings_dates(limit=XX) with increased limit argument.
|
||||
msft.earnings_dates
|
||||
|
||||
# show ISIN code - *experimental*
|
||||
@@ -140,32 +147,19 @@ msft.history(..., proxy="PROXY_SERVER")
|
||||
msft.get_actions(proxy="PROXY_SERVER")
|
||||
msft.get_dividends(proxy="PROXY_SERVER")
|
||||
msft.get_splits(proxy="PROXY_SERVER")
|
||||
msft.get_capital_gains(proxy="PROXY_SERVER")
|
||||
msft.get_balance_sheet(proxy="PROXY_SERVER")
|
||||
msft.get_cashflow(proxy="PROXY_SERVER")
|
||||
msft.option_chain(..., proxy="PROXY_SERVER")
|
||||
...
|
||||
```
|
||||
|
||||
To use a custom `requests` session (for example to cache calls to the
|
||||
API or customize the `User-agent` header), pass a `session=` argument to
|
||||
the Ticker constructor.
|
||||
|
||||
```python
|
||||
import requests_cache
|
||||
session = requests_cache.CachedSession('yfinance.cache')
|
||||
session.headers['User-agent'] = 'my-program/1.0'
|
||||
ticker = yf.Ticker('msft aapl goog', session=session)
|
||||
# The scraped response will be stored in the cache
|
||||
ticker.actions
|
||||
```
|
||||
|
||||
To initialize multiple `Ticker` objects, use
|
||||
|
||||
```python
|
||||
import yfinance as yf
|
||||
|
||||
tickers = yf.Tickers('msft aapl goog')
|
||||
# ^ returns a named tuple of Ticker objects
|
||||
|
||||
# access each ticker using (example)
|
||||
tickers.tickers['MSFT'].info
|
||||
@@ -180,63 +174,46 @@ import yfinance as yf
|
||||
data = yf.download("SPY AAPL", start="2017-01-01", end="2017-04-30")
|
||||
```
|
||||
|
||||
I've also added some options to make life easier :)
|
||||
`yf.download()` and `Ticker.history()` have many options for configuring fetching and processing, e.g.:
|
||||
|
||||
```python
|
||||
data = yf.download( # or pdr.get_data_yahoo(...
|
||||
# tickers list or string as well
|
||||
tickers = "SPY AAPL MSFT",
|
||||
|
||||
# use "period" instead of start/end
|
||||
# valid periods: 1d,5d,1mo,3mo,6mo,1y,2y,5y,10y,ytd,max
|
||||
# (optional, default is '1mo')
|
||||
period = "ytd",
|
||||
|
||||
# fetch data by interval (including intraday if period < 60 days)
|
||||
# valid intervals: 1m,2m,5m,15m,30m,60m,90m,1h,1d,5d,1wk,1mo,3mo
|
||||
# (optional, default is '1d')
|
||||
interval = "1m",
|
||||
|
||||
# Whether to ignore timezone when aligning ticker data from
|
||||
# different timezones. Default is True. False may be useful for
|
||||
# minute/hourly data.
|
||||
ignore_tz = False,
|
||||
|
||||
# group by ticker (to access via data['SPY'])
|
||||
# (optional, default is 'column')
|
||||
group_by = 'ticker',
|
||||
|
||||
# adjust all OHLC automatically
|
||||
# (optional, default is False)
|
||||
auto_adjust = True,
|
||||
|
||||
# identify and attempt repair of currency unit mixups e.g. $/cents
|
||||
repair = False,
|
||||
|
||||
# download pre/post regular market hours data
|
||||
# (optional, default is False)
|
||||
prepost = True,
|
||||
|
||||
# use threads for mass downloading? (True/False/Integer)
|
||||
# (optional, default is True)
|
||||
threads = True,
|
||||
|
||||
# proxy URL scheme use use when downloading?
|
||||
# (optional, default is None)
|
||||
proxy = None
|
||||
)
|
||||
yf.download(tickers = "SPY AAPL", # list of tickers
|
||||
period = "1y", # time period
|
||||
interval = "1d", # trading interval
|
||||
ignore_tz = True, # ignore timezone when aligning data from different exchanges?
|
||||
prepost = False) # download pre/post market hours data?
|
||||
```
|
||||
|
||||
### Timezone cache store
|
||||
Review the [Wiki](https://github.com/ranaroussi/yfinance/wiki) for more options and detail.
|
||||
|
||||
### Smarter scraping
|
||||
|
||||
To use a custom `requests` session (for example to cache calls to the
|
||||
API or customize the `User-agent` header), pass a `session=` argument to
|
||||
the Ticker constructor.
|
||||
|
||||
When fetching price data, all dates are localized to stock exchange timezone.
|
||||
But timezone retrieval is relatively slow, so yfinance attemps to cache them
|
||||
in your users cache folder.
|
||||
You can direct cache to use a different location with `set_tz_cache_location()`:
|
||||
```python
|
||||
import yfinance as yf
|
||||
yf.set_tz_cache_location("custom/cache/location")
|
||||
...
|
||||
import requests_cache
|
||||
session = requests_cache.CachedSession('yfinance.cache')
|
||||
session.headers['User-agent'] = 'my-program/1.0'
|
||||
ticker = yf.Ticker('msft', session=session)
|
||||
# The scraped response will be stored in the cache
|
||||
ticker.actions
|
||||
```
|
||||
|
||||
Combine a `requests_cache` with rate-limiting to avoid triggering Yahoo's rate-limiter/blocker that can corrupt data.
|
||||
```python
|
||||
from requests import Session
|
||||
from requests_cache import CacheMixin, SQLiteCache
|
||||
from requests_ratelimiter import LimiterMixin, MemoryQueueBucket
|
||||
class CachedLimiterSession(CacheMixin, LimiterMixin, Session):
|
||||
""" """
|
||||
|
||||
session = CachedLimiterSession(
|
||||
per_second=0.9,
|
||||
bucket_class=MemoryQueueBucket,
|
||||
backend=SQLiteCache("yfinance.cache"),
|
||||
)
|
||||
```
|
||||
|
||||
### Managing Multi-Level Columns
|
||||
@@ -254,6 +231,18 @@ yfinance?](https://stackoverflow.com/questions/63107801)
|
||||
- How to download single or multiple tickers into a single
|
||||
dataframe with single level column names and a ticker column
|
||||
|
||||
### Timezone cache store
|
||||
|
||||
When fetching price data, all dates are localized to stock exchange timezone.
|
||||
But timezone retrieval is relatively slow, so yfinance attemps to cache them
|
||||
in your users cache folder.
|
||||
You can direct cache to use a different location with `set_tz_cache_location()`:
|
||||
```python
|
||||
import yfinance as yf
|
||||
yf.set_tz_cache_location("custom/cache/location")
|
||||
...
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## `pandas_datareader` override
|
||||
@@ -289,12 +278,16 @@ To install `yfinance` using `conda`, see
|
||||
### Requirements
|
||||
|
||||
- [Python](https://www.python.org) \>= 2.7, 3.4+
|
||||
- [Pandas](https://github.com/pydata/pandas) (tested to work with
|
||||
\>=0.23.1)
|
||||
- [Numpy](http://www.numpy.org) \>= 1.11.1
|
||||
- [requests](http://docs.python-requests.org/en/master/) \>= 2.14.2
|
||||
- [lxml](https://pypi.org/project/lxml/) \>= 4.5.1
|
||||
- [appdirs](https://pypi.org/project/appdirs) \>=1.4.4
|
||||
- [Pandas](https://github.com/pydata/pandas) \>= 1.3.0
|
||||
- [Numpy](http://www.numpy.org) \>= 1.16.5
|
||||
- [requests](http://docs.python-requests.org/en/master) \>= 2.26
|
||||
- [lxml](https://pypi.org/project/lxml) \>= 4.9.1
|
||||
- [appdirs](https://pypi.org/project/appdirs) \>= 1.4.4
|
||||
- [pytz](https://pypi.org/project/pytz) \>=2022.5
|
||||
- [frozendict](https://pypi.org/project/frozendict) \>= 2.3.4
|
||||
- [beautifulsoup4](https://pypi.org/project/beautifulsoup4) \>= 4.11.1
|
||||
- [html5lib](https://pypi.org/project/html5lib) \>= 1.1
|
||||
- [cryptography](https://pypi.org/project/cryptography) \>= 3.3.2
|
||||
|
||||
### Optional (if you want to use `pandas_datareader`)
|
||||
|
||||
|
||||
30
meta.yaml
30
meta.yaml
@@ -1,5 +1,5 @@
|
||||
{% set name = "yfinance" %}
|
||||
{% set version = "0.1.58" %}
|
||||
{% set version = "0.2.11" %}
|
||||
|
||||
package:
|
||||
name: "{{ name|lower }}"
|
||||
@@ -16,22 +16,34 @@ build:
|
||||
|
||||
requirements:
|
||||
host:
|
||||
- pandas >=0.24.0
|
||||
- pandas >=1.3.0
|
||||
- numpy >=1.16.5
|
||||
- requests >=2.21
|
||||
- requests >=2.26
|
||||
- multitasking >=0.0.7
|
||||
- lxml >=4.5.1
|
||||
- appdirs >= 1.4.4
|
||||
- lxml >=4.9.1
|
||||
- appdirs >=1.4.4
|
||||
- pytz >=2022.5
|
||||
- frozendict >=2.3.4
|
||||
- beautifulsoup4 >=4.11.1
|
||||
- html5lib >=1.1
|
||||
# - pycryptodome >=3.6.6
|
||||
- cryptography >=3.3.2
|
||||
- pip
|
||||
- python
|
||||
|
||||
run:
|
||||
- pandas >=0.24.0
|
||||
- pandas >=1.3.0
|
||||
- numpy >=1.16.5
|
||||
- requests >=2.21
|
||||
- requests >=2.26
|
||||
- multitasking >=0.0.7
|
||||
- lxml >=4.5.1
|
||||
- appdirs >= 1.4.4
|
||||
- lxml >=4.9.1
|
||||
- appdirs >=1.4.4
|
||||
- pytz >=2022.5
|
||||
- frozendict >=2.3.4
|
||||
- beautifulsoup4 >=4.11.1
|
||||
- html5lib >=1.1
|
||||
# - pycryptodome >=3.6.6
|
||||
- cryptography >=3.3.2
|
||||
- python
|
||||
|
||||
test:
|
||||
|
||||
@@ -1,10 +1,11 @@
|
||||
pandas>=0.24.0
|
||||
pandas>=1.3.0
|
||||
numpy>=1.16.5
|
||||
requests>=2.26
|
||||
multitasking>=0.0.7
|
||||
lxml>=4.5.1
|
||||
lxml>=4.9.1
|
||||
appdirs>=1.4.4
|
||||
pytz>=2022.5
|
||||
frozendict>=2.3.4
|
||||
beautifulsoup4>=4.11.1
|
||||
html5lib>=1.1
|
||||
html5lib>=1.1
|
||||
cryptography>=3.3.2
|
||||
|
||||
8
setup.py
8
setup.py
@@ -59,10 +59,12 @@ setup(
|
||||
platforms=['any'],
|
||||
keywords='pandas, yahoo finance, pandas datareader',
|
||||
packages=find_packages(exclude=['contrib', 'docs', 'tests', 'examples']),
|
||||
install_requires=['pandas>=1.1.0', 'numpy>=1.15',
|
||||
install_requires=['pandas>=1.3.0', 'numpy>=1.16.5',
|
||||
'requests>=2.26', 'multitasking>=0.0.7',
|
||||
'lxml>=4.5.1', 'appdirs>=1.4.4', 'pytz>=2022.5',
|
||||
'frozendict>=2.3.4',
|
||||
'lxml>=4.9.1', 'appdirs>=1.4.4', 'pytz>=2022.5',
|
||||
'frozendict>=2.3.4',
|
||||
# 'pycryptodome>=3.6.6',
|
||||
'cryptography>=3.3.2',
|
||||
'beautifulsoup4>=4.11.1', 'html5lib>=1.1'],
|
||||
entry_points={
|
||||
'console_scripts': [
|
||||
|
||||
328
tests/prices.py
328
tests/prices.py
@@ -6,28 +6,25 @@ import datetime as _dt
|
||||
import pytz as _tz
|
||||
import numpy as _np
|
||||
import pandas as _pd
|
||||
import os
|
||||
|
||||
# Create temp session
|
||||
import requests_cache, tempfile
|
||||
|
||||
td = tempfile.TemporaryDirectory()
|
||||
import requests_cache
|
||||
|
||||
|
||||
class TestPriceHistory(unittest.TestCase):
|
||||
def setUp(self):
|
||||
global td
|
||||
self.td = td
|
||||
self.session = requests_cache.CachedSession(os.path.join(self.td.name, "yfinance.cache"))
|
||||
session = None
|
||||
|
||||
def tearDown(self):
|
||||
self.session.close()
|
||||
@classmethod
|
||||
def setUpClass(cls):
|
||||
cls.session = requests_cache.CachedSession(backend='memory')
|
||||
|
||||
@classmethod
|
||||
def tearDownClass(cls):
|
||||
if cls.session is not None:
|
||||
cls.session.close()
|
||||
|
||||
def test_daily_index(self):
|
||||
tkrs = ["BHP.AX", "IMP.JO", "BP.L", "PNL.L", "INTC"]
|
||||
|
||||
intervals = ["1d", "1wk", "1mo"]
|
||||
|
||||
for tkr in tkrs:
|
||||
dat = yf.Ticker(tkr, session=self.session)
|
||||
|
||||
@@ -37,6 +34,25 @@ class TestPriceHistory(unittest.TestCase):
|
||||
f = df.index.time == _dt.time(0)
|
||||
self.assertTrue(f.all())
|
||||
|
||||
def test_duplicatingHourly(self):
|
||||
tkrs = ["IMP.JO", "BHG.JO", "SSW.JO", "BP.L", "INTC"]
|
||||
for tkr in tkrs:
|
||||
dat = yf.Ticker(tkr, session=self.session)
|
||||
tz = dat._get_ticker_tz(debug_mode=False, proxy=None, timeout=None)
|
||||
|
||||
dt_utc = _tz.timezone("UTC").localize(_dt.datetime.utcnow())
|
||||
dt = dt_utc.astimezone(_tz.timezone(tz))
|
||||
start_d = dt.date() - _dt.timedelta(days=7)
|
||||
df = dat.history(start=start_d, interval="1h")
|
||||
|
||||
dt0 = df.index[-2]
|
||||
dt1 = df.index[-1]
|
||||
try:
|
||||
self.assertNotEqual(dt0.hour, dt1.hour)
|
||||
except:
|
||||
print("Ticker = ", tkr)
|
||||
raise
|
||||
|
||||
def test_duplicatingDaily(self):
|
||||
tkrs = ["IMP.JO", "BHG.JO", "SSW.JO", "BP.L", "INTC"]
|
||||
test_run = False
|
||||
@@ -91,22 +107,27 @@ class TestPriceHistory(unittest.TestCase):
|
||||
def test_intraDayWithEvents(self):
|
||||
# TASE dividend release pre-market, doesn't merge nicely with intra-day data so check still present
|
||||
|
||||
tkr = "ICL.TA"
|
||||
# tkr = "ESLT.TA"
|
||||
# tkr = "ONE.TA"
|
||||
# tkr = "MGDL.TA"
|
||||
start_d = _dt.date.today() - _dt.timedelta(days=60)
|
||||
end_d = None
|
||||
df_daily = yf.Ticker(tkr, session=self.session).history(start=start_d, end=end_d, interval="1d", actions=True)
|
||||
df_daily_divs = df_daily["Dividends"][df_daily["Dividends"] != 0]
|
||||
if df_daily_divs.shape[0] == 0:
|
||||
self.skipTest("Skipping test_intraDayWithEvents() because 'ICL.TA' has no dividend in last 60 days")
|
||||
tase_tkrs = ["ICL.TA", "ESLT.TA", "ONE.TA", "MGDL.TA"]
|
||||
test_run = False
|
||||
for tkr in tase_tkrs:
|
||||
start_d = _dt.date.today() - _dt.timedelta(days=59)
|
||||
end_d = None
|
||||
df_daily = yf.Ticker(tkr, session=self.session).history(start=start_d, end=end_d, interval="1d", actions=True)
|
||||
df_daily_divs = df_daily["Dividends"][df_daily["Dividends"] != 0]
|
||||
if df_daily_divs.shape[0] == 0:
|
||||
# self.skipTest("Skipping test_intraDayWithEvents() because 'ICL.TA' has no dividend in last 60 days")
|
||||
continue
|
||||
|
||||
last_div_date = df_daily_divs.index[-1]
|
||||
start_d = last_div_date.date()
|
||||
end_d = last_div_date.date() + _dt.timedelta(days=1)
|
||||
df = yf.Ticker(tkr, session=self.session).history(start=start_d, end=end_d, interval="15m", actions=True)
|
||||
self.assertTrue((df["Dividends"] != 0.0).any())
|
||||
last_div_date = df_daily_divs.index[-1]
|
||||
start_d = last_div_date.date()
|
||||
end_d = last_div_date.date() + _dt.timedelta(days=1)
|
||||
df = yf.Ticker(tkr, session=self.session).history(start=start_d, end=end_d, interval="15m", actions=True)
|
||||
self.assertTrue((df["Dividends"] != 0.0).any())
|
||||
test_run = True
|
||||
break
|
||||
|
||||
if not test_run:
|
||||
self.skipTest("Skipping test_intraDayWithEvents() because no tickers had a dividend in last 60 days")
|
||||
|
||||
def test_dailyWithEvents(self):
|
||||
# Reproduce issue #521
|
||||
@@ -211,7 +232,6 @@ class TestPriceHistory(unittest.TestCase):
|
||||
|
||||
def test_tz_dst_ambiguous(self):
|
||||
# Reproduce issue #1100
|
||||
|
||||
try:
|
||||
yf.Ticker("ESLT.TA", session=self.session).history(start="2002-10-06", end="2002-10-09", interval="1d")
|
||||
except _tz.exceptions.AmbiguousTimeError:
|
||||
@@ -242,6 +262,116 @@ class TestPriceHistory(unittest.TestCase):
|
||||
print("Weekly data not aligned to Monday")
|
||||
raise
|
||||
|
||||
def test_prune_post_intraday_us(self):
|
||||
# Half-day before USA Thanksgiving. Yahoo normally
|
||||
# returns an interval starting when regular trading closes,
|
||||
# even if prepost=False.
|
||||
|
||||
# Setup
|
||||
tkr = "AMZN"
|
||||
interval = "1h"
|
||||
interval_td = _dt.timedelta(hours=1)
|
||||
time_open = _dt.time(9, 30)
|
||||
time_close = _dt.time(16)
|
||||
special_day = _dt.date(2022, 11, 25)
|
||||
time_early_close = _dt.time(13)
|
||||
dat = yf.Ticker(tkr, session=self.session)
|
||||
|
||||
# Run
|
||||
start_d = special_day - _dt.timedelta(days=7)
|
||||
end_d = special_day + _dt.timedelta(days=7)
|
||||
df = dat.history(start=start_d, end=end_d, interval=interval, prepost=False, keepna=True)
|
||||
tg_last_dt = df.loc[str(special_day)].index[-1]
|
||||
self.assertTrue(tg_last_dt.time() < time_early_close)
|
||||
|
||||
# Test no other afternoons (or mornings) were pruned
|
||||
start_d = _dt.date(special_day.year, 1, 1)
|
||||
end_d = _dt.date(special_day.year+1, 1, 1)
|
||||
df = dat.history(start=start_d, end=end_d, interval="1h", prepost=False, keepna=True)
|
||||
last_dts = _pd.Series(df.index).groupby(df.index.date).last()
|
||||
f_early_close = (last_dts+interval_td).dt.time < time_close
|
||||
early_close_dates = last_dts.index[f_early_close].values
|
||||
self.assertEqual(len(early_close_dates), 1)
|
||||
self.assertEqual(early_close_dates[0], special_day)
|
||||
|
||||
first_dts = _pd.Series(df.index).groupby(df.index.date).first()
|
||||
f_late_open = first_dts.dt.time > time_open
|
||||
late_open_dates = first_dts.index[f_late_open]
|
||||
self.assertEqual(len(late_open_dates), 0)
|
||||
|
||||
def test_prune_post_intraday_omx(self):
|
||||
# Half-day before Sweden Christmas. Yahoo normally
|
||||
# returns an interval starting when regular trading closes,
|
||||
# even if prepost=False.
|
||||
# If prepost=False, test that yfinance is removing prepost intervals.
|
||||
|
||||
# Setup
|
||||
tkr = "AEC.ST"
|
||||
interval = "1h"
|
||||
interval_td = _dt.timedelta(hours=1)
|
||||
time_open = _dt.time(9)
|
||||
time_close = _dt.time(17,30)
|
||||
special_day = _dt.date(2022, 12, 23)
|
||||
time_early_close = _dt.time(13, 2)
|
||||
dat = yf.Ticker(tkr, session=self.session)
|
||||
|
||||
# Half trading day Jan 5, Apr 14, May 25, Jun 23, Nov 4, Dec 23, Dec 30
|
||||
half_days = [_dt.date(special_day.year, x[0], x[1]) for x in [(1,5), (4,14), (5,25), (6,23), (11,4), (12,23), (12,30)]]
|
||||
|
||||
# Yahoo has incorrectly classified afternoon of 2022-04-13 as post-market.
|
||||
# Nothing yfinance can do because Yahoo doesn't return data with prepost=False.
|
||||
# But need to handle in this test.
|
||||
expected_incorrect_half_days = [_dt.date(2022,4,13)]
|
||||
half_days = sorted(half_days+expected_incorrect_half_days)
|
||||
|
||||
# Run
|
||||
start_d = special_day - _dt.timedelta(days=7)
|
||||
end_d = special_day + _dt.timedelta(days=7)
|
||||
df = dat.history(start=start_d, end=end_d, interval=interval, prepost=False, keepna=True)
|
||||
tg_last_dt = df.loc[str(special_day)].index[-1]
|
||||
self.assertTrue(tg_last_dt.time() < time_early_close)
|
||||
|
||||
# Test no other afternoons (or mornings) were pruned
|
||||
start_d = _dt.date(special_day.year, 1, 1)
|
||||
end_d = _dt.date(special_day.year+1, 1, 1)
|
||||
df = dat.history(start=start_d, end=end_d, interval="1h", prepost=False, keepna=True)
|
||||
last_dts = _pd.Series(df.index).groupby(df.index.date).last()
|
||||
f_early_close = (last_dts+interval_td).dt.time < time_close
|
||||
early_close_dates = last_dts.index[f_early_close].values
|
||||
unexpected_early_close_dates = [d for d in early_close_dates if not d in half_days]
|
||||
self.assertEqual(len(unexpected_early_close_dates), 0)
|
||||
self.assertEqual(len(early_close_dates), len(half_days))
|
||||
self.assertTrue(_np.equal(early_close_dates, half_days).all())
|
||||
|
||||
first_dts = _pd.Series(df.index).groupby(df.index.date).first()
|
||||
f_late_open = first_dts.dt.time > time_open
|
||||
late_open_dates = first_dts.index[f_late_open]
|
||||
self.assertEqual(len(late_open_dates), 0)
|
||||
|
||||
def test_prune_post_intraday_asx(self):
|
||||
# Setup
|
||||
tkr = "BHP.AX"
|
||||
interval = "1h"
|
||||
interval_td = _dt.timedelta(hours=1)
|
||||
time_open = _dt.time(10)
|
||||
time_close = _dt.time(16,12)
|
||||
# No early closes in 2022
|
||||
dat = yf.Ticker(tkr, session=self.session)
|
||||
|
||||
# Test no afternoons (or mornings) were pruned
|
||||
start_d = _dt.date(2022, 1, 1)
|
||||
end_d = _dt.date(2022+1, 1, 1)
|
||||
df = dat.history(start=start_d, end=end_d, interval="1h", prepost=False, keepna=True)
|
||||
last_dts = _pd.Series(df.index).groupby(df.index.date).last()
|
||||
f_early_close = (last_dts+interval_td).dt.time < time_close
|
||||
early_close_dates = last_dts.index[f_early_close].values
|
||||
self.assertEqual(len(early_close_dates), 0)
|
||||
|
||||
first_dts = _pd.Series(df.index).groupby(df.index.date).first()
|
||||
f_late_open = first_dts.dt.time > time_open
|
||||
late_open_dates = first_dts.index[f_late_open]
|
||||
self.assertEqual(len(late_open_dates), 0)
|
||||
|
||||
def test_weekly_2rows_fix(self):
|
||||
tkr = "AMZN"
|
||||
start = _dt.date.today() - _dt.timedelta(days=14)
|
||||
@@ -251,15 +381,43 @@ class TestPriceHistory(unittest.TestCase):
|
||||
df = dat.history(start=start, interval="1wk")
|
||||
self.assertTrue((df.index.weekday == 0).all())
|
||||
|
||||
def test_repair_weekly_100x(self):
|
||||
# Sometimes, Yahoo returns prices 100x the correct value.
|
||||
# Suspect mixup between £/pence or $/cents etc.
|
||||
# E.g. ticker PNL.L
|
||||
class TestPriceRepair(unittest.TestCase):
|
||||
session = None
|
||||
|
||||
@classmethod
|
||||
def setUpClass(cls):
|
||||
cls.session = requests_cache.CachedSession(backend='memory')
|
||||
|
||||
@classmethod
|
||||
def tearDownClass(cls):
|
||||
if cls.session is not None:
|
||||
cls.session.close()
|
||||
|
||||
def test_reconstruct_2m(self):
|
||||
# 2m repair requires 1m data.
|
||||
# Yahoo restricts 1m fetches to 7 days max within last 30 days.
|
||||
# Need to test that '_reconstruct_intervals_batch()' can handle this.
|
||||
|
||||
tkrs = ["BHP.AX", "IMP.JO", "BP.L", "PNL.L", "INTC"]
|
||||
|
||||
dt_now = _pd.Timestamp.utcnow()
|
||||
td_7d = _dt.timedelta(days=7)
|
||||
td_60d = _dt.timedelta(days=60)
|
||||
|
||||
# Round time for 'requests_cache' reuse
|
||||
dt_now = dt_now.ceil("1h")
|
||||
|
||||
for tkr in tkrs:
|
||||
dat = yf.Ticker(tkr, session=self.session)
|
||||
end_dt = dt_now
|
||||
start_dt = end_dt - td_60d
|
||||
df = dat.history(start=start_dt, end=end_dt, interval="2m", repair=True)
|
||||
|
||||
def test_repair_100x_weekly(self):
|
||||
# Setup:
|
||||
tkr = "PNL.L"
|
||||
dat = yf.Ticker(tkr, session=self.session)
|
||||
tz_exchange = dat.info["exchangeTimezoneName"]
|
||||
tz_exchange = dat.fast_info["timezone"]
|
||||
|
||||
data_cols = ["Low", "High", "Open", "Close", "Adj Close"]
|
||||
df = _pd.DataFrame(data={"Open": [470.5, 473.5, 474.5, 470],
|
||||
@@ -268,25 +426,32 @@ class TestPriceHistory(unittest.TestCase):
|
||||
"Close": [475, 473.5, 472, 473.5],
|
||||
"Adj Close": [475, 473.5, 472, 473.5],
|
||||
"Volume": [2295613, 2245604, 3000287, 2635611]},
|
||||
index=_pd.to_datetime([_dt.date(2022, 10, 23),
|
||||
_dt.date(2022, 10, 16),
|
||||
_dt.date(2022, 10, 9),
|
||||
_dt.date(2022, 10, 2)]))
|
||||
index=_pd.to_datetime([_dt.date(2022, 10, 24),
|
||||
_dt.date(2022, 10, 17),
|
||||
_dt.date(2022, 10, 10),
|
||||
_dt.date(2022, 10, 3)]))
|
||||
df = df.sort_index()
|
||||
df.index.name = "Date"
|
||||
df_bad = df.copy()
|
||||
df_bad.loc["2022-10-23", "Close"] *= 100
|
||||
df_bad.loc["2022-10-16", "Low"] *= 100
|
||||
df_bad.loc["2022-10-2", "Open"] *= 100
|
||||
df_bad.loc["2022-10-24", "Close"] *= 100
|
||||
df_bad.loc["2022-10-17", "Low"] *= 100
|
||||
df_bad.loc["2022-10-03", "Open"] *= 100
|
||||
df.index = df.index.tz_localize(tz_exchange)
|
||||
df_bad.index = df_bad.index.tz_localize(tz_exchange)
|
||||
|
||||
# Run test
|
||||
|
||||
df_repaired = dat._fix_unit_mixups(df_bad, "1wk", tz_exchange)
|
||||
df_repaired = dat._fix_unit_mixups(df_bad, "1wk", tz_exchange, prepost=False)
|
||||
|
||||
# First test - no errors left
|
||||
for c in data_cols:
|
||||
self.assertTrue(_np.isclose(df_repaired[c], df[c], rtol=1e-2).all())
|
||||
try:
|
||||
self.assertTrue(_np.isclose(df_repaired[c], df[c], rtol=1e-2).all())
|
||||
except:
|
||||
print(df[c])
|
||||
print(df_repaired[c])
|
||||
raise
|
||||
|
||||
|
||||
# Second test - all differences should be either ~1x or ~100x
|
||||
ratio = df_bad[data_cols].values / df[data_cols].values
|
||||
@@ -299,16 +464,12 @@ class TestPriceHistory(unittest.TestCase):
|
||||
f_1 = ratio == 1
|
||||
self.assertTrue((f_100 | f_1).all())
|
||||
|
||||
def test_repair_weekly_preSplit_100x(self):
|
||||
# Sometimes, Yahoo returns prices 100x the correct value.
|
||||
# Suspect mixup between £/pence or $/cents etc.
|
||||
# E.g. ticker PNL.L
|
||||
|
||||
def test_repair_100x_weekly_preSplit(self):
|
||||
# PNL.L has a stock-split in 2022. Sometimes requesting data before 2022 is not split-adjusted.
|
||||
|
||||
tkr = "PNL.L"
|
||||
dat = yf.Ticker(tkr, session=self.session)
|
||||
tz_exchange = dat.info["exchangeTimezoneName"]
|
||||
tz_exchange = dat.fast_info["timezone"]
|
||||
|
||||
data_cols = ["Low", "High", "Open", "Close", "Adj Close"]
|
||||
df = _pd.DataFrame(data={"Open": [400, 398, 392.5, 417],
|
||||
@@ -321,6 +482,7 @@ class TestPriceHistory(unittest.TestCase):
|
||||
_dt.date(2020, 3, 23),
|
||||
_dt.date(2020, 3, 16),
|
||||
_dt.date(2020, 3, 9)]))
|
||||
df = df.sort_index()
|
||||
# Simulate data missing split-adjustment:
|
||||
df[data_cols] *= 100.0
|
||||
df["Volume"] *= 0.01
|
||||
@@ -334,7 +496,7 @@ class TestPriceHistory(unittest.TestCase):
|
||||
df.index = df.index.tz_localize(tz_exchange)
|
||||
df_bad.index = df_bad.index.tz_localize(tz_exchange)
|
||||
|
||||
df_repaired = dat._fix_unit_mixups(df_bad, "1wk", tz_exchange)
|
||||
df_repaired = dat._fix_unit_mixups(df_bad, "1wk", tz_exchange, prepost=False)
|
||||
|
||||
# First test - no errors left
|
||||
for c in data_cols:
|
||||
@@ -359,14 +521,10 @@ class TestPriceHistory(unittest.TestCase):
|
||||
f_1 = ratio == 1
|
||||
self.assertTrue((f_100 | f_1).all())
|
||||
|
||||
def test_repair_daily_100x(self):
|
||||
# Sometimes, Yahoo returns prices 100x the correct value.
|
||||
# Suspect mixup between £/pence or $/cents etc.
|
||||
# E.g. ticker PNL.L
|
||||
|
||||
def test_repair_100x_daily(self):
|
||||
tkr = "PNL.L"
|
||||
dat = yf.Ticker(tkr, session=self.session)
|
||||
tz_exchange = dat.info["exchangeTimezoneName"]
|
||||
tz_exchange = dat.fast_info["timezone"]
|
||||
|
||||
data_cols = ["Low", "High", "Open", "Close", "Adj Close"]
|
||||
df = _pd.DataFrame(data={"Open": [478, 476, 476, 472],
|
||||
@@ -379,6 +537,7 @@ class TestPriceHistory(unittest.TestCase):
|
||||
_dt.date(2022, 10, 31),
|
||||
_dt.date(2022, 10, 28),
|
||||
_dt.date(2022, 10, 27)]))
|
||||
df = df.sort_index()
|
||||
df.index.name = "Date"
|
||||
df_bad = df.copy()
|
||||
df_bad.loc["2022-11-01", "Close"] *= 100
|
||||
@@ -387,7 +546,7 @@ class TestPriceHistory(unittest.TestCase):
|
||||
df.index = df.index.tz_localize(tz_exchange)
|
||||
df_bad.index = df_bad.index.tz_localize(tz_exchange)
|
||||
|
||||
df_repaired = dat._fix_unit_mixups(df_bad, "1d", tz_exchange)
|
||||
df_repaired = dat._fix_unit_mixups(df_bad, "1d", tz_exchange, prepost=False)
|
||||
|
||||
# First test - no errors left
|
||||
for c in data_cols:
|
||||
@@ -404,13 +563,10 @@ class TestPriceHistory(unittest.TestCase):
|
||||
f_1 = ratio == 1
|
||||
self.assertTrue((f_100 | f_1).all())
|
||||
|
||||
def test_repair_daily_zeroes(self):
|
||||
# Sometimes Yahoo returns price=0.0 when price obviously not zero
|
||||
# E.g. ticker BBIL.L
|
||||
|
||||
def test_repair_zeroes_daily(self):
|
||||
tkr = "BBIL.L"
|
||||
dat = yf.Ticker(tkr, session=self.session)
|
||||
tz_exchange = dat.info["exchangeTimezoneName"]
|
||||
tz_exchange = dat.fast_info["timezone"]
|
||||
|
||||
df_bad = _pd.DataFrame(data={"Open": [0, 102.04, 102.04],
|
||||
"High": [0, 102.1, 102.11],
|
||||
@@ -421,23 +577,52 @@ class TestPriceHistory(unittest.TestCase):
|
||||
index=_pd.to_datetime([_dt.datetime(2022, 11, 1),
|
||||
_dt.datetime(2022, 10, 31),
|
||||
_dt.datetime(2022, 10, 30)]))
|
||||
df_bad = df_bad.sort_index()
|
||||
df_bad.index.name = "Date"
|
||||
df_bad.index = df_bad.index.tz_localize(tz_exchange)
|
||||
|
||||
repaired_df = dat._fix_zero_prices(df_bad, "1d", tz_exchange)
|
||||
repaired_df = dat._fix_zeroes(df_bad, "1d", tz_exchange, prepost=False)
|
||||
|
||||
correct_df = df_bad.copy()
|
||||
correct_df.loc[correct_df.index[0], "Open"] = 102.080002
|
||||
correct_df.loc[correct_df.index[0], "Low"] = 102.032501
|
||||
correct_df.loc[correct_df.index[0], "High"] = 102.080002
|
||||
correct_df.loc["2022-11-01", "Open"] = 102.080002
|
||||
correct_df.loc["2022-11-01", "Low"] = 102.032501
|
||||
correct_df.loc["2022-11-01", "High"] = 102.080002
|
||||
for c in ["Open", "Low", "High", "Close"]:
|
||||
self.assertTrue(_np.isclose(repaired_df[c], correct_df[c], rtol=1e-8).all())
|
||||
|
||||
try:
|
||||
if __name__ == '__main__':
|
||||
unittest.main()
|
||||
finally:
|
||||
td.cleanup()
|
||||
def test_repair_zeroes_hourly(self):
|
||||
tkr = "INTC"
|
||||
dat = yf.Ticker(tkr, session=self.session)
|
||||
tz_exchange = dat.fast_info["timezone"]
|
||||
|
||||
correct_df = dat.history(period="1wk", interval="1h", auto_adjust=False, repair=True)
|
||||
|
||||
df_bad = correct_df.copy()
|
||||
bad_idx = correct_df.index[10]
|
||||
df_bad.loc[bad_idx, "Open"] = _np.nan
|
||||
df_bad.loc[bad_idx, "High"] = _np.nan
|
||||
df_bad.loc[bad_idx, "Low"] = _np.nan
|
||||
df_bad.loc[bad_idx, "Close"] = _np.nan
|
||||
df_bad.loc[bad_idx, "Adj Close"] = _np.nan
|
||||
df_bad.loc[bad_idx, "Volume"] = 0
|
||||
|
||||
repaired_df = dat._fix_zeroes(df_bad, "1h", tz_exchange, prepost=False)
|
||||
|
||||
for c in ["Open", "Low", "High", "Close"]:
|
||||
try:
|
||||
self.assertTrue(_np.isclose(repaired_df[c], correct_df[c], rtol=1e-7).all())
|
||||
except:
|
||||
print("COLUMN", c)
|
||||
print("- repaired_df")
|
||||
print(repaired_df)
|
||||
print("- correct_df[c]:")
|
||||
print(correct_df[c])
|
||||
print("- diff:")
|
||||
print(repaired_df[c] - correct_df[c])
|
||||
raise
|
||||
|
||||
if __name__ == '__main__':
|
||||
unittest.main()
|
||||
|
||||
# # Run tests sequentially:
|
||||
# import inspect
|
||||
@@ -446,4 +631,3 @@ finally:
|
||||
# test_src.index(f"def {x}") - test_src.index(f"def {y}")
|
||||
# )
|
||||
# unittest.main(verbosity=2)
|
||||
|
||||
|
||||
577
tests/ticker.py
577
tests/ticker.py
@@ -9,6 +9,7 @@ Specific test class:
|
||||
|
||||
"""
|
||||
import pandas as pd
|
||||
import numpy as np
|
||||
|
||||
from .context import yfinance as yf
|
||||
|
||||
@@ -16,7 +17,7 @@ import unittest
|
||||
import requests_cache
|
||||
|
||||
# Set this to see the exact requests that are made during tests
|
||||
DEBUG_LOG_REQUESTS = True
|
||||
DEBUG_LOG_REQUESTS = False
|
||||
|
||||
if DEBUG_LOG_REQUESTS:
|
||||
import logging
|
||||
@@ -29,7 +30,7 @@ class TestTicker(unittest.TestCase):
|
||||
|
||||
@classmethod
|
||||
def setUpClass(cls):
|
||||
cls.session = requests_cache.CachedSession()
|
||||
cls.session = requests_cache.CachedSession(backend='memory')
|
||||
|
||||
@classmethod
|
||||
def tearDownClass(cls):
|
||||
@@ -51,12 +52,16 @@ class TestTicker(unittest.TestCase):
|
||||
def test_badTicker(self):
|
||||
# Check yfinance doesn't die when ticker delisted
|
||||
|
||||
tkr = "AM2Z.TA"
|
||||
tkr = "DJI" # typo of "^DJI"
|
||||
dat = yf.Ticker(tkr, session=self.session)
|
||||
dat.history(period="1wk")
|
||||
dat.history(start="2022-01-01")
|
||||
dat.history(start="2022-01-01", end="2022-03-01")
|
||||
yf.download([tkr], period="1wk")
|
||||
|
||||
for k in dat.fast_info:
|
||||
dat.fast_info[k]
|
||||
|
||||
dat.isin
|
||||
dat.major_holders
|
||||
dat.institutional_holders
|
||||
@@ -65,6 +70,7 @@ class TestTicker(unittest.TestCase):
|
||||
dat.splits
|
||||
dat.actions
|
||||
dat.shares
|
||||
dat.get_shares_full()
|
||||
dat.info
|
||||
dat.calendar
|
||||
dat.recommendations
|
||||
@@ -86,22 +92,129 @@ class TestTicker(unittest.TestCase):
|
||||
dat.earnings_dates
|
||||
dat.earnings_forecasts
|
||||
|
||||
def test_goodTicker(self):
|
||||
# that yfinance works when full api is called on same instance of ticker
|
||||
|
||||
class TestTickerEarnings(unittest.TestCase):
|
||||
tkrs = ["IBM"]
|
||||
tkrs.append("QCSTIX") # weird ticker, no price history but has previous close
|
||||
for tkr in tkrs:
|
||||
dat = yf.Ticker(tkr, session=self.session)
|
||||
|
||||
dat.history(period="1wk")
|
||||
dat.history(start="2022-01-01")
|
||||
dat.history(start="2022-01-01", end="2022-03-01")
|
||||
yf.download([tkr], period="1wk")
|
||||
|
||||
for k in dat.fast_info:
|
||||
dat.fast_info[k]
|
||||
|
||||
dat.isin
|
||||
dat.major_holders
|
||||
dat.institutional_holders
|
||||
dat.mutualfund_holders
|
||||
dat.dividends
|
||||
dat.splits
|
||||
dat.actions
|
||||
dat.shares
|
||||
dat.get_shares_full()
|
||||
dat.info
|
||||
dat.calendar
|
||||
dat.recommendations
|
||||
dat.earnings
|
||||
dat.quarterly_earnings
|
||||
dat.income_stmt
|
||||
dat.quarterly_income_stmt
|
||||
dat.balance_sheet
|
||||
dat.quarterly_balance_sheet
|
||||
dat.cashflow
|
||||
dat.quarterly_cashflow
|
||||
dat.recommendations_summary
|
||||
dat.analyst_price_target
|
||||
dat.revenue_forecasts
|
||||
dat.sustainability
|
||||
dat.options
|
||||
dat.news
|
||||
dat.earnings_trend
|
||||
dat.earnings_dates
|
||||
dat.earnings_forecasts
|
||||
|
||||
|
||||
class TestTickerHistory(unittest.TestCase):
|
||||
session = None
|
||||
|
||||
@classmethod
|
||||
def setUpClass(cls):
|
||||
cls.session = requests_cache.CachedSession(backend='memory')
|
||||
|
||||
@classmethod
|
||||
def tearDownClass(cls):
|
||||
if cls.session is not None:
|
||||
cls.session.close()
|
||||
|
||||
def setUp(self):
|
||||
self.ticker = yf.Ticker("GOOGL")
|
||||
# use a ticker that has dividends
|
||||
self.ticker = yf.Ticker("IBM", session=self.session)
|
||||
|
||||
def tearDown(self):
|
||||
self.ticker = None
|
||||
|
||||
def test_earnings_history(self):
|
||||
data = self.ticker.earnings_history
|
||||
def test_history(self):
|
||||
with self.assertRaises(RuntimeError):
|
||||
self.ticker.history_metadata
|
||||
data = self.ticker.history("1y")
|
||||
self.assertIn("IBM", self.ticker.history_metadata.values(), "metadata missing")
|
||||
self.assertIsInstance(data, pd.DataFrame, "data has wrong type")
|
||||
self.assertFalse(data.empty, "data is empty")
|
||||
|
||||
data_cached = self.ticker.earnings_history
|
||||
self.assertIs(data, data_cached, "data not cached")
|
||||
def test_no_expensive_calls_introduced(self):
|
||||
"""
|
||||
Make sure calling history to get price data has not introduced more calls to yahoo than absolutely necessary.
|
||||
As doing other type of scraping calls than "query2.finance.yahoo.com/v8/finance/chart" to yahoo website
|
||||
will quickly trigger spam-block when doing bulk download of history data.
|
||||
"""
|
||||
session = requests_cache.CachedSession(backend='memory')
|
||||
ticker = yf.Ticker("GOOGL", session=session)
|
||||
ticker.history("1y")
|
||||
actual_urls_called = tuple([r.url for r in session.cache.filter()])
|
||||
session.close()
|
||||
expected_urls = (
|
||||
'https://query2.finance.yahoo.com/v8/finance/chart/GOOGL?range=1y&interval=1d&includePrePost=False&events=div%2Csplits%2CcapitalGains',
|
||||
)
|
||||
self.assertEqual(expected_urls, actual_urls_called, "Different than expected url used to fetch history.")
|
||||
|
||||
def test_dividends(self):
|
||||
data = self.ticker.dividends
|
||||
self.assertIsInstance(data, pd.Series, "data has wrong type")
|
||||
self.assertFalse(data.empty, "data is empty")
|
||||
|
||||
def test_splits(self):
|
||||
data = self.ticker.splits
|
||||
self.assertIsInstance(data, pd.Series, "data has wrong type")
|
||||
# self.assertFalse(data.empty, "data is empty")
|
||||
|
||||
def test_actions(self):
|
||||
data = self.ticker.actions
|
||||
self.assertIsInstance(data, pd.DataFrame, "data has wrong type")
|
||||
self.assertFalse(data.empty, "data is empty")
|
||||
|
||||
|
||||
class TestTickerEarnings(unittest.TestCase):
|
||||
session = None
|
||||
|
||||
@classmethod
|
||||
def setUpClass(cls):
|
||||
cls.session = requests_cache.CachedSession(backend='memory')
|
||||
|
||||
@classmethod
|
||||
def tearDownClass(cls):
|
||||
if cls.session is not None:
|
||||
cls.session.close()
|
||||
|
||||
def setUp(self):
|
||||
self.ticker = yf.Ticker("GOOGL", session=self.session)
|
||||
|
||||
def tearDown(self):
|
||||
self.ticker = None
|
||||
|
||||
def test_earnings(self):
|
||||
data = self.ticker.earnings
|
||||
@@ -143,11 +256,33 @@ class TestTickerEarnings(unittest.TestCase):
|
||||
data_cached = self.ticker.earnings_trend
|
||||
self.assertIs(data, data_cached, "data not cached")
|
||||
|
||||
def test_earnings_dates_with_limit(self):
|
||||
# use ticker with lots of historic earnings
|
||||
ticker = yf.Ticker("IBM")
|
||||
limit = 110
|
||||
data = ticker.get_earnings_dates(limit=limit)
|
||||
self.assertIsInstance(data, pd.DataFrame, "data has wrong type")
|
||||
self.assertFalse(data.empty, "data is empty")
|
||||
self.assertEqual(len(data), limit, "Wrong number or rows")
|
||||
|
||||
data_cached = ticker.get_earnings_dates(limit=limit)
|
||||
self.assertIs(data, data_cached, "data not cached")
|
||||
|
||||
|
||||
class TestTickerHolders(unittest.TestCase):
|
||||
session = None
|
||||
|
||||
@classmethod
|
||||
def setUpClass(cls):
|
||||
cls.session = requests_cache.CachedSession(backend='memory')
|
||||
|
||||
@classmethod
|
||||
def tearDownClass(cls):
|
||||
if cls.session is not None:
|
||||
cls.session.close()
|
||||
|
||||
def setUp(self):
|
||||
self.ticker = yf.Ticker("GOOGL")
|
||||
self.ticker = yf.Ticker("GOOGL", session=self.session)
|
||||
|
||||
def tearDown(self):
|
||||
self.ticker = None
|
||||
@@ -178,52 +313,291 @@ class TestTickerHolders(unittest.TestCase):
|
||||
|
||||
|
||||
class TestTickerMiscFinancials(unittest.TestCase):
|
||||
session = None
|
||||
|
||||
@classmethod
|
||||
def setUpClass(cls):
|
||||
cls.session = requests_cache.CachedSession(backend='memory')
|
||||
|
||||
@classmethod
|
||||
def tearDownClass(cls):
|
||||
if cls.session is not None:
|
||||
cls.session.close()
|
||||
|
||||
def setUp(self):
|
||||
self.ticker = yf.Ticker("GOOGL")
|
||||
self.ticker = yf.Ticker("GOOGL", session=self.session)
|
||||
|
||||
# For ticker 'BSE.AX' (and others), Yahoo not returning
|
||||
# full quarterly financials (usually cash-flow) with all entries,
|
||||
# instead returns a smaller version in different data store.
|
||||
self.ticker_old_fmt = yf.Ticker("BSE.AX", session=self.session)
|
||||
|
||||
def tearDown(self):
|
||||
self.ticker = None
|
||||
|
||||
def test_balance_sheet(self):
|
||||
expected_row = "TotalAssets"
|
||||
data = self.ticker.balance_sheet
|
||||
def test_income_statement(self):
|
||||
expected_keys = ["Total Revenue", "Basic EPS"]
|
||||
expected_periods_days = 365
|
||||
|
||||
# Test contents of table
|
||||
data = self.ticker.get_income_stmt(pretty=True)
|
||||
self.assertIsInstance(data, pd.DataFrame, "data has wrong type")
|
||||
self.assertFalse(data.empty, "data is empty")
|
||||
for k in expected_keys:
|
||||
self.assertIn(k, data.index, "Did not find expected row in index")
|
||||
period = abs((data.columns[0]-data.columns[1]).days)
|
||||
self.assertLess(abs(period-expected_periods_days), 20, "Not returning annual financials")
|
||||
|
||||
# Test property defaults
|
||||
data2 = self.ticker.income_stmt
|
||||
self.assertTrue(data.equals(data2), "property not defaulting to 'pretty=True'")
|
||||
|
||||
# Test pretty=False
|
||||
expected_keys = [k.replace(' ', '') for k in expected_keys]
|
||||
data = self.ticker.get_income_stmt(pretty=False)
|
||||
self.assertIsInstance(data, pd.DataFrame, "data has wrong type")
|
||||
self.assertFalse(data.empty, "data is empty")
|
||||
for k in expected_keys:
|
||||
self.assertIn(k, data.index, "Did not find expected row in index")
|
||||
|
||||
# Test to_dict
|
||||
data = self.ticker.get_income_stmt(as_dict=True)
|
||||
self.assertIsInstance(data, dict, "data has wrong type")
|
||||
|
||||
|
||||
def test_quarterly_income_statement(self):
|
||||
expected_keys = ["Total Revenue", "Basic EPS"]
|
||||
expected_periods_days = 365//4
|
||||
|
||||
# Test contents of table
|
||||
data = self.ticker.get_income_stmt(pretty=True, freq="quarterly")
|
||||
self.assertIsInstance(data, pd.DataFrame, "data has wrong type")
|
||||
self.assertFalse(data.empty, "data is empty")
|
||||
for k in expected_keys:
|
||||
self.assertIn(k, data.index, "Did not find expected row in index")
|
||||
period = abs((data.columns[0]-data.columns[1]).days)
|
||||
self.assertLess(abs(period-expected_periods_days), 20, "Not returning quarterly financials")
|
||||
|
||||
# Test property defaults
|
||||
data2 = self.ticker.quarterly_income_stmt
|
||||
self.assertTrue(data.equals(data2), "property not defaulting to 'pretty=True'")
|
||||
|
||||
# Test pretty=False
|
||||
expected_keys = [k.replace(' ', '') for k in expected_keys]
|
||||
data = self.ticker.get_income_stmt(pretty=False, freq="quarterly")
|
||||
self.assertIsInstance(data, pd.DataFrame, "data has wrong type")
|
||||
self.assertFalse(data.empty, "data is empty")
|
||||
for k in expected_keys:
|
||||
self.assertIn(k, data.index, "Did not find expected row in index")
|
||||
|
||||
# Test to_dict
|
||||
data = self.ticker.get_income_stmt(as_dict=True)
|
||||
self.assertIsInstance(data, dict, "data has wrong type")
|
||||
|
||||
def test_quarterly_income_statement_old_fmt(self):
|
||||
expected_row = "TotalRevenue"
|
||||
data = self.ticker_old_fmt.get_income_stmt(freq="quarterly", legacy=True)
|
||||
self.assertIsInstance(data, pd.DataFrame, "data has wrong type")
|
||||
self.assertFalse(data.empty, "data is empty")
|
||||
self.assertIn(expected_row, data.index, "Did not find expected row in index")
|
||||
|
||||
data_cached = self.ticker.balance_sheet
|
||||
data_cached = self.ticker_old_fmt.get_income_stmt(freq="quarterly", legacy=True)
|
||||
self.assertIs(data, data_cached, "data not cached")
|
||||
|
||||
def test_balance_sheet(self):
|
||||
expected_keys = ["Total Assets", "Net PPE"]
|
||||
expected_periods_days = 365
|
||||
|
||||
# Test contents of table
|
||||
data = self.ticker.get_balance_sheet(pretty=True)
|
||||
self.assertIsInstance(data, pd.DataFrame, "data has wrong type")
|
||||
self.assertFalse(data.empty, "data is empty")
|
||||
for k in expected_keys:
|
||||
self.assertIn(k, data.index, "Did not find expected row in index")
|
||||
period = abs((data.columns[0]-data.columns[1]).days)
|
||||
self.assertLess(abs(period-expected_periods_days), 20, "Not returning annual financials")
|
||||
|
||||
# Test property defaults
|
||||
data2 = self.ticker.balance_sheet
|
||||
self.assertTrue(data.equals(data2), "property not defaulting to 'pretty=True'")
|
||||
|
||||
# Test pretty=False
|
||||
expected_keys = [k.replace(' ', '') for k in expected_keys]
|
||||
data = self.ticker.get_balance_sheet(pretty=False)
|
||||
self.assertIsInstance(data, pd.DataFrame, "data has wrong type")
|
||||
self.assertFalse(data.empty, "data is empty")
|
||||
for k in expected_keys:
|
||||
self.assertIn(k, data.index, "Did not find expected row in index")
|
||||
|
||||
# Test to_dict
|
||||
data = self.ticker.get_balance_sheet(as_dict=True)
|
||||
self.assertIsInstance(data, dict, "data has wrong type")
|
||||
|
||||
def test_quarterly_balance_sheet(self):
|
||||
expected_keys = ["Total Assets", "Net PPE"]
|
||||
expected_periods_days = 365//4
|
||||
|
||||
# Test contents of table
|
||||
data = self.ticker.get_balance_sheet(pretty=True, freq="quarterly")
|
||||
self.assertIsInstance(data, pd.DataFrame, "data has wrong type")
|
||||
self.assertFalse(data.empty, "data is empty")
|
||||
for k in expected_keys:
|
||||
self.assertIn(k, data.index, "Did not find expected row in index")
|
||||
period = abs((data.columns[0]-data.columns[1]).days)
|
||||
self.assertLess(abs(period-expected_periods_days), 20, "Not returning quarterly financials")
|
||||
|
||||
# Test property defaults
|
||||
data2 = self.ticker.quarterly_balance_sheet
|
||||
self.assertTrue(data.equals(data2), "property not defaulting to 'pretty=True'")
|
||||
|
||||
# Test pretty=False
|
||||
expected_keys = [k.replace(' ', '') for k in expected_keys]
|
||||
data = self.ticker.get_balance_sheet(pretty=False, freq="quarterly")
|
||||
self.assertIsInstance(data, pd.DataFrame, "data has wrong type")
|
||||
self.assertFalse(data.empty, "data is empty")
|
||||
for k in expected_keys:
|
||||
self.assertIn(k, data.index, "Did not find expected row in index")
|
||||
|
||||
# Test to_dict
|
||||
data = self.ticker.get_balance_sheet(as_dict=True, freq="quarterly")
|
||||
self.assertIsInstance(data, dict, "data has wrong type")
|
||||
|
||||
def test_quarterly_balance_sheet_old_fmt(self):
|
||||
expected_row = "TotalAssets"
|
||||
data = self.ticker.quarterly_balance_sheet
|
||||
data = self.ticker_old_fmt.get_balance_sheet(freq="quarterly", legacy=True)
|
||||
self.assertIsInstance(data, pd.DataFrame, "data has wrong type")
|
||||
self.assertFalse(data.empty, "data is empty")
|
||||
self.assertIn(expected_row, data.index, "Did not find expected row in index")
|
||||
|
||||
data_cached = self.ticker.quarterly_balance_sheet
|
||||
data_cached = self.ticker_old_fmt.get_balance_sheet(freq="quarterly", legacy=True)
|
||||
self.assertIs(data, data_cached, "data not cached")
|
||||
|
||||
def test_cashflow(self):
|
||||
expected_row = "OperatingCashFlow"
|
||||
data = self.ticker.cashflow
|
||||
def test_cash_flow(self):
|
||||
expected_keys = ["Operating Cash Flow", "Net PPE Purchase And Sale"]
|
||||
expected_periods_days = 365
|
||||
|
||||
# Test contents of table
|
||||
data = self.ticker.get_cashflow(pretty=True)
|
||||
self.assertIsInstance(data, pd.DataFrame, "data has wrong type")
|
||||
self.assertFalse(data.empty, "data is empty")
|
||||
for k in expected_keys:
|
||||
self.assertIn(k, data.index, "Did not find expected row in index")
|
||||
period = abs((data.columns[0]-data.columns[1]).days)
|
||||
self.assertLess(abs(period-expected_periods_days), 20, "Not returning annual financials")
|
||||
|
||||
# Test property defaults
|
||||
data2 = self.ticker.cashflow
|
||||
self.assertTrue(data.equals(data2), "property not defaulting to 'pretty=True'")
|
||||
|
||||
# Test pretty=False
|
||||
expected_keys = [k.replace(' ', '') for k in expected_keys]
|
||||
data = self.ticker.get_cashflow(pretty=False)
|
||||
self.assertIsInstance(data, pd.DataFrame, "data has wrong type")
|
||||
self.assertFalse(data.empty, "data is empty")
|
||||
for k in expected_keys:
|
||||
self.assertIn(k, data.index, "Did not find expected row in index")
|
||||
|
||||
# Test to_dict
|
||||
data = self.ticker.get_cashflow(as_dict=True)
|
||||
self.assertIsInstance(data, dict, "data has wrong type")
|
||||
|
||||
def test_quarterly_cash_flow(self):
|
||||
expected_keys = ["Operating Cash Flow", "Net PPE Purchase And Sale"]
|
||||
expected_periods_days = 365//4
|
||||
|
||||
# Test contents of table
|
||||
data = self.ticker.get_cashflow(pretty=True, freq="quarterly")
|
||||
self.assertIsInstance(data, pd.DataFrame, "data has wrong type")
|
||||
self.assertFalse(data.empty, "data is empty")
|
||||
for k in expected_keys:
|
||||
self.assertIn(k, data.index, "Did not find expected row in index")
|
||||
period = abs((data.columns[0]-data.columns[1]).days)
|
||||
self.assertLess(abs(period-expected_periods_days), 20, "Not returning quarterly financials")
|
||||
|
||||
# Test property defaults
|
||||
data2 = self.ticker.quarterly_cashflow
|
||||
self.assertTrue(data.equals(data2), "property not defaulting to 'pretty=True'")
|
||||
|
||||
# Test pretty=False
|
||||
expected_keys = [k.replace(' ', '') for k in expected_keys]
|
||||
data = self.ticker.get_cashflow(pretty=False, freq="quarterly")
|
||||
self.assertIsInstance(data, pd.DataFrame, "data has wrong type")
|
||||
self.assertFalse(data.empty, "data is empty")
|
||||
for k in expected_keys:
|
||||
self.assertIn(k, data.index, "Did not find expected row in index")
|
||||
|
||||
# Test to_dict
|
||||
data = self.ticker.get_cashflow(as_dict=True)
|
||||
self.assertIsInstance(data, dict, "data has wrong type")
|
||||
|
||||
def test_quarterly_cashflow_old_fmt(self):
|
||||
expected_row = "NetIncome"
|
||||
data = self.ticker_old_fmt.get_cashflow(legacy=True, freq="quarterly")
|
||||
self.assertIsInstance(data, pd.DataFrame, "data has wrong type")
|
||||
self.assertFalse(data.empty, "data is empty")
|
||||
self.assertIn(expected_row, data.index, "Did not find expected row in index")
|
||||
|
||||
data_cached = self.ticker.cashflow
|
||||
data_cached = self.ticker_old_fmt.get_cashflow(legacy=True, freq="quarterly")
|
||||
self.assertIs(data, data_cached, "data not cached")
|
||||
|
||||
def test_quarterly_cashflow(self):
|
||||
expected_row = "OperatingCashFlow"
|
||||
data = self.ticker.quarterly_cashflow
|
||||
self.assertIsInstance(data, pd.DataFrame, "data has wrong type")
|
||||
self.assertFalse(data.empty, "data is empty")
|
||||
self.assertIn(expected_row, data.index, "Did not find expected row in index")
|
||||
def test_income_alt_names(self):
|
||||
i1 = self.ticker.income_stmt
|
||||
i2 = self.ticker.incomestmt
|
||||
self.assertTrue(i1.equals(i2))
|
||||
i3 = self.ticker.financials
|
||||
self.assertTrue(i1.equals(i3))
|
||||
|
||||
data_cached = self.ticker.quarterly_cashflow
|
||||
self.assertIs(data, data_cached, "data not cached")
|
||||
i1 = self.ticker.get_income_stmt()
|
||||
i2 = self.ticker.get_incomestmt()
|
||||
self.assertTrue(i1.equals(i2))
|
||||
i3 = self.ticker.get_financials()
|
||||
self.assertTrue(i1.equals(i3))
|
||||
|
||||
i1 = self.ticker.quarterly_income_stmt
|
||||
i2 = self.ticker.quarterly_incomestmt
|
||||
self.assertTrue(i1.equals(i2))
|
||||
i3 = self.ticker.quarterly_financials
|
||||
self.assertTrue(i1.equals(i3))
|
||||
|
||||
i1 = self.ticker.get_income_stmt(freq="quarterly")
|
||||
i2 = self.ticker.get_incomestmt(freq="quarterly")
|
||||
self.assertTrue(i1.equals(i2))
|
||||
i3 = self.ticker.get_financials(freq="quarterly")
|
||||
self.assertTrue(i1.equals(i3))
|
||||
|
||||
def test_balance_sheet_alt_names(self):
|
||||
i1 = self.ticker.balance_sheet
|
||||
i2 = self.ticker.balancesheet
|
||||
self.assertTrue(i1.equals(i2))
|
||||
|
||||
i1 = self.ticker.get_balance_sheet()
|
||||
i2 = self.ticker.get_balancesheet()
|
||||
self.assertTrue(i1.equals(i2))
|
||||
|
||||
i1 = self.ticker.quarterly_balance_sheet
|
||||
i2 = self.ticker.quarterly_balancesheet
|
||||
self.assertTrue(i1.equals(i2))
|
||||
|
||||
i1 = self.ticker.get_balance_sheet(freq="quarterly")
|
||||
i2 = self.ticker.get_balancesheet(freq="quarterly")
|
||||
self.assertTrue(i1.equals(i2))
|
||||
|
||||
def test_cash_flow_alt_names(self):
|
||||
i1 = self.ticker.cash_flow
|
||||
i2 = self.ticker.cashflow
|
||||
self.assertTrue(i1.equals(i2))
|
||||
|
||||
i1 = self.ticker.get_cash_flow()
|
||||
i2 = self.ticker.get_cashflow()
|
||||
self.assertTrue(i1.equals(i2))
|
||||
|
||||
i1 = self.ticker.quarterly_cash_flow
|
||||
i2 = self.ticker.quarterly_cashflow
|
||||
self.assertTrue(i1.equals(i2))
|
||||
|
||||
i1 = self.ticker.get_cash_flow(freq="quarterly")
|
||||
i2 = self.ticker.get_cashflow(freq="quarterly")
|
||||
self.assertTrue(i1.equals(i2))
|
||||
|
||||
def test_sustainability(self):
|
||||
data = self.ticker.sustainability
|
||||
@@ -286,13 +660,158 @@ class TestTickerMiscFinancials(unittest.TestCase):
|
||||
self.assertIsInstance(data, tuple, "data has wrong type")
|
||||
self.assertTrue(len(data) > 1, "data is empty")
|
||||
|
||||
def test_shares(self):
|
||||
data = self.ticker.shares
|
||||
self.assertIsInstance(data, pd.DataFrame, "data has wrong type")
|
||||
self.assertFalse(data.empty, "data is empty")
|
||||
|
||||
def test_shares_full(self):
|
||||
data = self.ticker.get_shares_full()
|
||||
self.assertIsInstance(data, pd.Series, "data has wrong type")
|
||||
self.assertFalse(data.empty, "data is empty")
|
||||
|
||||
def test_bad_freq_value_raises_exception(self):
|
||||
self.assertRaises(ValueError, lambda: self.ticker.get_cashflow(freq="badarg"))
|
||||
|
||||
|
||||
class TestTickerInfo(unittest.TestCase):
|
||||
session = None
|
||||
|
||||
@classmethod
|
||||
def setUpClass(cls):
|
||||
cls.session = requests_cache.CachedSession(backend='memory')
|
||||
|
||||
@classmethod
|
||||
def tearDownClass(cls):
|
||||
if cls.session is not None:
|
||||
cls.session.close()
|
||||
|
||||
def setUp(self):
|
||||
self.symbols = []
|
||||
self.symbols += ["ESLT.TA", "BP.L", "GOOGL"]
|
||||
self.symbols.append("QCSTIX") # good for testing, doesn't trade
|
||||
self.symbols += ["BTC-USD", "IWO", "VFINX", "^GSPC"]
|
||||
self.symbols += ["SOKE.IS", "ADS.DE"] # detected bugs
|
||||
self.tickers = [yf.Ticker(s, session=self.session) for s in self.symbols]
|
||||
|
||||
def tearDown(self):
|
||||
self.ticker = None
|
||||
|
||||
def test_info(self):
|
||||
data = self.tickers[0].info
|
||||
self.assertIsInstance(data, dict, "data has wrong type")
|
||||
self.assertIn("symbol", data.keys(), "Did not find expected key in info dict")
|
||||
self.assertEqual(self.symbols[0], data["symbol"], "Wrong symbol value in info dict")
|
||||
|
||||
def test_fast_info(self):
|
||||
yf.scrapers.quote.PRUNE_INFO = False
|
||||
|
||||
fast_info_keys = set()
|
||||
for ticker in self.tickers:
|
||||
fast_info_keys.update(set(ticker.fast_info.keys()))
|
||||
fast_info_keys = sorted(list(fast_info_keys))
|
||||
|
||||
key_rename_map = {}
|
||||
key_rename_map["currency"] = "currency"
|
||||
key_rename_map["quote_type"] = "quoteType"
|
||||
key_rename_map["timezone"] = "exchangeTimezoneName"
|
||||
|
||||
key_rename_map["last_price"] = ["currentPrice", "regularMarketPrice"]
|
||||
key_rename_map["open"] = ["open", "regularMarketOpen"]
|
||||
key_rename_map["day_high"] = ["dayHigh", "regularMarketDayHigh"]
|
||||
key_rename_map["day_low"] = ["dayLow", "regularMarketDayLow"]
|
||||
key_rename_map["previous_close"] = ["previousClose"]
|
||||
key_rename_map["regular_market_previous_close"] = ["regularMarketPreviousClose"]
|
||||
|
||||
key_rename_map["fifty_day_average"] = "fiftyDayAverage"
|
||||
key_rename_map["two_hundred_day_average"] = "twoHundredDayAverage"
|
||||
key_rename_map["year_change"] = ["52WeekChange", "fiftyTwoWeekChange"]
|
||||
key_rename_map["year_high"] = "fiftyTwoWeekHigh"
|
||||
key_rename_map["year_low"] = "fiftyTwoWeekLow"
|
||||
|
||||
key_rename_map["last_volume"] = ["volume", "regularMarketVolume"]
|
||||
key_rename_map["ten_day_average_volume"] = ["averageVolume10days", "averageDailyVolume10Day"]
|
||||
key_rename_map["three_month_average_volume"] = "averageVolume"
|
||||
|
||||
key_rename_map["market_cap"] = "marketCap"
|
||||
key_rename_map["shares"] = "sharesOutstanding"
|
||||
|
||||
for k in list(key_rename_map.keys()):
|
||||
if '_' in k:
|
||||
key_rename_map[yf.utils.snake_case_2_camelCase(k)] = key_rename_map[k]
|
||||
|
||||
# Note: share count items in info[] are bad. Sometimes the float > outstanding!
|
||||
# So often fast_info["shares"] does not match.
|
||||
# Why isn't fast_info["shares"] wrong? Because using it to calculate market cap always correct.
|
||||
bad_keys = {"shares"}
|
||||
|
||||
# Loose tolerance for averages, no idea why don't match info[]. Is info wrong?
|
||||
custom_tolerances = {}
|
||||
custom_tolerances["year_change"] = 1.0
|
||||
# custom_tolerances["ten_day_average_volume"] = 1e-3
|
||||
custom_tolerances["ten_day_average_volume"] = 1e-1
|
||||
# custom_tolerances["three_month_average_volume"] = 1e-2
|
||||
custom_tolerances["three_month_average_volume"] = 5e-1
|
||||
custom_tolerances["fifty_day_average"] = 1e-2
|
||||
custom_tolerances["two_hundred_day_average"] = 1e-2
|
||||
for k in list(custom_tolerances.keys()):
|
||||
if '_' in k:
|
||||
custom_tolerances[yf.utils.snake_case_2_camelCase(k)] = custom_tolerances[k]
|
||||
|
||||
for k in fast_info_keys:
|
||||
if k in key_rename_map:
|
||||
k2 = key_rename_map[k]
|
||||
else:
|
||||
k2 = k
|
||||
|
||||
if not isinstance(k2, list):
|
||||
k2 = [k2]
|
||||
|
||||
for m in k2:
|
||||
for ticker in self.tickers:
|
||||
if not m in ticker.info:
|
||||
# print(f"symbol={ticker.ticker}: fast_info key '{k}' mapped to info key '{m}' but not present in info")
|
||||
continue
|
||||
|
||||
if k in bad_keys:
|
||||
continue
|
||||
|
||||
if k in custom_tolerances:
|
||||
rtol = custom_tolerances[k]
|
||||
else:
|
||||
rtol = 5e-3
|
||||
# rtol = 1e-4
|
||||
|
||||
correct = ticker.info[m]
|
||||
test = ticker.fast_info[k]
|
||||
# print(f"Testing: symbol={ticker.ticker} m={m} k={k}: test={test} vs correct={correct}")
|
||||
if k in ["market_cap","marketCap"] and ticker.fast_info["currency"] in ["GBp", "ILA"]:
|
||||
# Adjust for currency to match Yahoo:
|
||||
test *= 0.01
|
||||
try:
|
||||
if correct is None:
|
||||
self.assertTrue(test is None or (not np.isnan(test)), f"{k}: {test} must be None or real value because correct={correct}")
|
||||
elif isinstance(test, float) or isinstance(correct, int):
|
||||
self.assertTrue(np.isclose(test, correct, rtol=rtol), f"{ticker.ticker} {k}: {test} != {correct}")
|
||||
else:
|
||||
self.assertEqual(test, correct, f"{k}: {test} != {correct}")
|
||||
except:
|
||||
if k in ["regularMarketPreviousClose"] and ticker.ticker in ["ADS.DE"]:
|
||||
# Yahoo is wrong, is returning post-market close not regular
|
||||
continue
|
||||
else:
|
||||
raise
|
||||
|
||||
|
||||
|
||||
def suite():
|
||||
suite = unittest.TestSuite()
|
||||
suite.addTest(TestTicker('Test ticker'))
|
||||
suite.addTest(TestTickerEarnings('Test earnings'))
|
||||
suite.addTest(TestTickerHolders('Test holders'))
|
||||
suite.addTest(TestTickerHistory('Test Ticker history'))
|
||||
suite.addTest(TestTickerMiscFinancials('Test misc financials'))
|
||||
suite.addTest(TestTickerInfo('Test info & fast_info'))
|
||||
return suite
|
||||
|
||||
|
||||
|
||||
1987
yfinance/base.py
1987
yfinance/base.py
File diff suppressed because it is too large
Load Diff
331
yfinance/data.py
331
yfinance/data.py
@@ -1,10 +1,20 @@
|
||||
import datetime
|
||||
import functools
|
||||
from functools import lru_cache
|
||||
|
||||
import pandas as pd
|
||||
import hashlib
|
||||
from base64 import b64decode
|
||||
usePycryptodome = False # slightly faster
|
||||
# usePycryptodome = True
|
||||
if usePycryptodome:
|
||||
from Crypto.Cipher import AES
|
||||
from Crypto.Util.Padding import unpad
|
||||
else:
|
||||
from cryptography.hazmat.primitives import padding
|
||||
from cryptography.hazmat.primitives.ciphers import Cipher, algorithms, modes
|
||||
|
||||
import requests as requests
|
||||
import re
|
||||
from bs4 import BeautifulSoup
|
||||
|
||||
from frozendict import frozendict
|
||||
|
||||
@@ -18,14 +28,16 @@ cache_maxsize = 64
|
||||
|
||||
def lru_cache_freezeargs(func):
|
||||
"""
|
||||
Decorator transforms mutable dictionary arguments into immutable
|
||||
Needed so lru_cache can cache method calls what has dict arguments.
|
||||
Decorator transforms mutable dictionary and list arguments into immutable types
|
||||
Needed so lru_cache can cache method calls what has dict or list arguments.
|
||||
"""
|
||||
|
||||
@functools.wraps(func)
|
||||
def wrapped(*args, **kwargs):
|
||||
args = tuple([frozendict(arg) if isinstance(arg, dict) else arg for arg in args])
|
||||
kwargs = {k: frozendict(v) if isinstance(v, dict) else v for k, v in kwargs.items()}
|
||||
args = tuple([tuple(arg) if isinstance(arg, list) else arg for arg in args])
|
||||
kwargs = {k: tuple(v) if isinstance(v, list) else v for k, v in kwargs.items()}
|
||||
return func(*args, **kwargs)
|
||||
|
||||
# copy over the lru_cache extra methods to this wrapper to be able to access them
|
||||
@@ -35,6 +47,127 @@ def lru_cache_freezeargs(func):
|
||||
return wrapped
|
||||
|
||||
|
||||
def _extract_extra_keys_from_stores(data):
|
||||
new_keys = [k for k in data.keys() if k not in ["context", "plugins"]]
|
||||
new_keys_values = set([data[k] for k in new_keys])
|
||||
|
||||
# Maybe multiple keys have same value - keep one of each
|
||||
new_keys_uniq = []
|
||||
new_keys_uniq_values = set()
|
||||
for k in new_keys:
|
||||
v = data[k]
|
||||
if not v in new_keys_uniq_values:
|
||||
new_keys_uniq.append(k)
|
||||
new_keys_uniq_values.add(v)
|
||||
|
||||
return [data[k] for k in new_keys_uniq]
|
||||
|
||||
|
||||
def decrypt_cryptojs_aes_stores(data, keys=None):
|
||||
encrypted_stores = data['context']['dispatcher']['stores']
|
||||
|
||||
password = None
|
||||
if keys is not None:
|
||||
if not isinstance(keys, list):
|
||||
raise TypeError("'keys' must be list")
|
||||
candidate_passwords = keys
|
||||
else:
|
||||
candidate_passwords = []
|
||||
|
||||
if "_cs" in data and "_cr" in data:
|
||||
_cs = data["_cs"]
|
||||
_cr = data["_cr"]
|
||||
_cr = b"".join(int.to_bytes(i, length=4, byteorder="big", signed=True) for i in json.loads(_cr)["words"])
|
||||
password = hashlib.pbkdf2_hmac("sha1", _cs.encode("utf8"), _cr, 1, dklen=32).hex()
|
||||
|
||||
encrypted_stores = b64decode(encrypted_stores)
|
||||
assert encrypted_stores[0:8] == b"Salted__"
|
||||
salt = encrypted_stores[8:16]
|
||||
encrypted_stores = encrypted_stores[16:]
|
||||
|
||||
def _EVPKDF(password, salt, keySize=32, ivSize=16, iterations=1, hashAlgorithm="md5") -> tuple:
|
||||
"""OpenSSL EVP Key Derivation Function
|
||||
Args:
|
||||
password (Union[str, bytes, bytearray]): Password to generate key from.
|
||||
salt (Union[bytes, bytearray]): Salt to use.
|
||||
keySize (int, optional): Output key length in bytes. Defaults to 32.
|
||||
ivSize (int, optional): Output Initialization Vector (IV) length in bytes. Defaults to 16.
|
||||
iterations (int, optional): Number of iterations to perform. Defaults to 1.
|
||||
hashAlgorithm (str, optional): Hash algorithm to use for the KDF. Defaults to 'md5'.
|
||||
Returns:
|
||||
key, iv: Derived key and Initialization Vector (IV) bytes.
|
||||
|
||||
Taken from: https://gist.github.com/rafiibrahim8/0cd0f8c46896cafef6486cb1a50a16d3
|
||||
OpenSSL original code: https://github.com/openssl/openssl/blob/master/crypto/evp/evp_key.c#L78
|
||||
"""
|
||||
|
||||
assert iterations > 0, "Iterations can not be less than 1."
|
||||
|
||||
if isinstance(password, str):
|
||||
password = password.encode("utf-8")
|
||||
|
||||
final_length = keySize + ivSize
|
||||
key_iv = b""
|
||||
block = None
|
||||
|
||||
while len(key_iv) < final_length:
|
||||
hasher = hashlib.new(hashAlgorithm)
|
||||
if block:
|
||||
hasher.update(block)
|
||||
hasher.update(password)
|
||||
hasher.update(salt)
|
||||
block = hasher.digest()
|
||||
for _ in range(1, iterations):
|
||||
block = hashlib.new(hashAlgorithm, block).digest()
|
||||
key_iv += block
|
||||
|
||||
key, iv = key_iv[:keySize], key_iv[keySize:final_length]
|
||||
return key, iv
|
||||
|
||||
def _decrypt(encrypted_stores, password, key, iv):
|
||||
if usePycryptodome:
|
||||
cipher = AES.new(key, AES.MODE_CBC, iv=iv)
|
||||
plaintext = cipher.decrypt(encrypted_stores)
|
||||
plaintext = unpad(plaintext, 16, style="pkcs7")
|
||||
else:
|
||||
cipher = Cipher(algorithms.AES(key), modes.CBC(iv))
|
||||
decryptor = cipher.decryptor()
|
||||
plaintext = decryptor.update(encrypted_stores) + decryptor.finalize()
|
||||
unpadder = padding.PKCS7(128).unpadder()
|
||||
plaintext = unpadder.update(plaintext) + unpadder.finalize()
|
||||
plaintext = plaintext.decode("utf-8")
|
||||
return plaintext
|
||||
|
||||
if not password is None:
|
||||
try:
|
||||
key, iv = _EVPKDF(password, salt, keySize=32, ivSize=16, iterations=1, hashAlgorithm="md5")
|
||||
except:
|
||||
raise Exception("yfinance failed to decrypt Yahoo data response")
|
||||
plaintext = _decrypt(encrypted_stores, password, key, iv)
|
||||
else:
|
||||
success = False
|
||||
for i in range(len(candidate_passwords)):
|
||||
# print(f"Trying candiate pw {i+1}/{len(candidate_passwords)}")
|
||||
password = candidate_passwords[i]
|
||||
try:
|
||||
key, iv = _EVPKDF(password, salt, keySize=32, ivSize=16, iterations=1, hashAlgorithm="md5")
|
||||
|
||||
plaintext = _decrypt(encrypted_stores, password, key, iv)
|
||||
|
||||
success = True
|
||||
break
|
||||
except:
|
||||
pass
|
||||
if not success:
|
||||
raise Exception("yfinance failed to decrypt Yahoo data response")
|
||||
|
||||
decoded_stores = json.loads(plaintext)
|
||||
return decoded_stores
|
||||
|
||||
|
||||
_SCRAPE_URL_ = 'https://finance.yahoo.com/quote'
|
||||
|
||||
|
||||
class TickerData:
|
||||
"""
|
||||
Have one place to retrieve data from Yahoo API in order to ease caching and speed up operations
|
||||
@@ -43,11 +176,9 @@ class TickerData:
|
||||
'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/39.0.2171.95 Safari/537.36'}
|
||||
|
||||
def __init__(self, ticker: str, session=None):
|
||||
self._ticker = ticker
|
||||
self.ticker = ticker
|
||||
self._session = session or requests
|
||||
|
||||
@lru_cache_freezeargs
|
||||
@lru_cache(maxsize=cache_maxsize)
|
||||
def get(self, url, user_agent_headers=None, params=None, proxy=None, timeout=30):
|
||||
proxy = self._get_proxy(proxy)
|
||||
response = self._session.get(
|
||||
@@ -58,6 +189,11 @@ class TickerData:
|
||||
headers=user_agent_headers or self.user_agent_headers)
|
||||
return response
|
||||
|
||||
@lru_cache_freezeargs
|
||||
@lru_cache(maxsize=cache_maxsize)
|
||||
def cache_get(self, url, user_agent_headers=None, params=None, proxy=None, timeout=30):
|
||||
return self.get(url, user_agent_headers, params, proxy, timeout)
|
||||
|
||||
def _get_proxy(self, proxy):
|
||||
# setup proxy in requests format
|
||||
if proxy is not None:
|
||||
@@ -66,88 +202,123 @@ class TickerData:
|
||||
proxy = {"https": proxy}
|
||||
return proxy
|
||||
|
||||
def _get_decryption_keys_from_yahoo_js(self, soup):
|
||||
result = None
|
||||
|
||||
key_count = 4
|
||||
re_script = soup.find("script", string=re.compile("root.App.main")).text
|
||||
re_data = json.loads(re.search("root.App.main\s+=\s+(\{.*\})", re_script).group(1))
|
||||
re_data.pop("context", None)
|
||||
key_list = list(re_data.keys())
|
||||
if re_data.get("plugins"): # 1) attempt to get last 4 keys after plugins
|
||||
ind = key_list.index("plugins")
|
||||
if len(key_list) > ind+1:
|
||||
sub_keys = key_list[ind+1:]
|
||||
if len(sub_keys) == key_count:
|
||||
re_obj = {}
|
||||
missing_val = False
|
||||
for k in sub_keys:
|
||||
if not re_data.get(k):
|
||||
missing_val = True
|
||||
break
|
||||
re_obj.update({k: re_data.get(k)})
|
||||
if not missing_val:
|
||||
result = re_obj
|
||||
|
||||
if not result is None:
|
||||
return [''.join(result.values())]
|
||||
|
||||
re_keys = [] # 2) attempt scan main.js file approach to get keys
|
||||
prefix = "https://s.yimg.com/uc/finance/dd-site/js/main."
|
||||
tags = [tag['src'] for tag in soup.find_all('script') if prefix in tag.get('src', '')]
|
||||
for t in tags:
|
||||
response_js = self.cache_get(t)
|
||||
#
|
||||
if response_js.status_code != 200:
|
||||
time.sleep(random.randrange(10, 20))
|
||||
response_js.close()
|
||||
else:
|
||||
r_data = response_js.content.decode("utf8")
|
||||
re_list = [
|
||||
x.group() for x in re.finditer(r"context.dispatcher.stores=JSON.parse((?:.*?\r?\n?)*)toString", r_data)
|
||||
]
|
||||
for rl in re_list:
|
||||
re_sublist = [x.group() for x in re.finditer(r"t\[\"((?:.*?\r?\n?)*)\"\]", rl)]
|
||||
if len(re_sublist) == key_count:
|
||||
re_keys = [sl.replace('t["', '').replace('"]', '') for sl in re_sublist]
|
||||
break
|
||||
response_js.close()
|
||||
if len(re_keys) == key_count:
|
||||
break
|
||||
if len(re_keys) > 0:
|
||||
re_obj = {}
|
||||
missing_val = False
|
||||
for k in re_keys:
|
||||
if not re_data.get(k):
|
||||
missing_val = True
|
||||
break
|
||||
re_obj.update({k: re_data.get(k)})
|
||||
if not missing_val:
|
||||
return [''.join(re_obj.values())]
|
||||
|
||||
return []
|
||||
|
||||
@lru_cache_freezeargs
|
||||
@lru_cache(maxsize=cache_maxsize)
|
||||
def get_json_data_stores(self, url, proxy=None):
|
||||
def get_json_data_stores(self, sub_page: str = None, proxy=None) -> dict:
|
||||
'''
|
||||
get_json_data_stores returns a python dictionary of the data stores in yahoo finance web page.
|
||||
'''
|
||||
html = self.get(url=url, proxy=proxy).text
|
||||
if sub_page:
|
||||
ticker_url = "{}/{}/{}".format(_SCRAPE_URL_, self.ticker, sub_page)
|
||||
else:
|
||||
ticker_url = "{}/{}".format(_SCRAPE_URL_, self.ticker)
|
||||
|
||||
json_str = html.split('root.App.main =')[1].split(
|
||||
'(this)')[0].split(';\n}')[0].strip()
|
||||
data = json.loads(json_str)['context']['dispatcher']['stores']
|
||||
response = self.get(url=ticker_url, proxy=proxy)
|
||||
html = response.text
|
||||
|
||||
# The actual json-data for stores is in a javascript assignment in the webpage
|
||||
try:
|
||||
json_str = html.split('root.App.main =')[1].split(
|
||||
'(this)')[0].split(';\n}')[0].strip()
|
||||
except IndexError:
|
||||
# Fetch failed, probably because Yahoo spam triggered
|
||||
return {}
|
||||
|
||||
data = json.loads(json_str)
|
||||
|
||||
# Gather decryption keys:
|
||||
soup = BeautifulSoup(response.content, "html.parser")
|
||||
keys = self._get_decryption_keys_from_yahoo_js(soup)
|
||||
if len(keys) == 0:
|
||||
msg = "No decryption keys could be extracted from JS file."
|
||||
if "requests_cache" in str(type(response)):
|
||||
msg += " Try flushing your 'requests_cache', probably parsing old JS."
|
||||
print("WARNING: " + msg + " Falling back to backup decrypt methods.")
|
||||
if len(keys) == 0:
|
||||
keys = []
|
||||
try:
|
||||
extra_keys = _extract_extra_keys_from_stores(data)
|
||||
keys = [''.join(extra_keys[-4:])]
|
||||
except:
|
||||
pass
|
||||
#
|
||||
keys_url = "https://github.com/ranaroussi/yfinance/raw/main/yfinance/scrapers/yahoo-keys.txt"
|
||||
response_gh = self.cache_get(keys_url)
|
||||
keys += response_gh.text.splitlines()
|
||||
|
||||
# Decrypt!
|
||||
stores = decrypt_cryptojs_aes_stores(data, keys)
|
||||
if stores is None:
|
||||
# Maybe Yahoo returned old format, not encrypted
|
||||
if "context" in data and "dispatcher" in data["context"]:
|
||||
stores = data['context']['dispatcher']['stores']
|
||||
if stores is None:
|
||||
raise Exception(f"{self.ticker}: Failed to extract data stores from web request")
|
||||
|
||||
# return data
|
||||
new_data = json.dumps(data).replace('{}', 'null')
|
||||
new_data = json.dumps(stores).replace('{}', 'null')
|
||||
new_data = re.sub(
|
||||
r'{[\'|\"]raw[\'|\"]:(.*?),(.*?)}', r'\1', new_data)
|
||||
|
||||
return json.loads(new_data)
|
||||
|
||||
# Note cant use lru_cache as financials_data is a nested dict (freezeargs only handle flat dicts)
|
||||
def get_financials_time_series(self, timescale, financials_data, proxy=None):
|
||||
|
||||
acceptable_timestamps = ["annual", "quarterly"]
|
||||
if timescale not in acceptable_timestamps:
|
||||
raise Exception("timescale '{}' must be one of: {}".format(timescale, acceptable_timestamps))
|
||||
|
||||
# Step 1: get the keys:
|
||||
def _finditem1(key, obj):
|
||||
values = []
|
||||
if isinstance(obj, dict):
|
||||
if key in obj.keys():
|
||||
values.append(obj[key])
|
||||
for k, v in obj.items():
|
||||
values += _finditem1(key, v)
|
||||
elif isinstance(obj, list):
|
||||
for v in obj:
|
||||
values += _finditem1(key, v)
|
||||
return values
|
||||
|
||||
keys = _finditem1("key", financials_data['FinancialTemplateStore'])
|
||||
|
||||
# Step 2: construct url:
|
||||
ts_url_base = "https://query2.finance.yahoo.com/ws/fundamentals-timeseries/v1/finance/timeseries/{0}?symbol={0}".format(
|
||||
self._ticker)
|
||||
if len(keys) == 0:
|
||||
raise Exception("Fetching keys failed")
|
||||
url = ts_url_base + "&type=" + ",".join([timescale + k for k in keys])
|
||||
# Yahoo returns maximum 4 years or 5 quarters, regardless of start_dt:
|
||||
start_dt = datetime.datetime(2016, 12, 31)
|
||||
end = (datetime.datetime.now() + datetime.timedelta(days=366))
|
||||
url += "&period1={}&period2={}".format(int(start_dt.timestamp()), int(end.timestamp()))
|
||||
|
||||
# Step 3: fetch and reshape data
|
||||
json_str = self.get(url=url, proxy=proxy).text
|
||||
json_data = json.loads(json_str)
|
||||
data_raw = json_data["timeseries"]["result"]
|
||||
# data_raw = [v for v in data_raw if len(v) > 1] # Discard keys with no data
|
||||
for d in data_raw:
|
||||
del d["meta"]
|
||||
|
||||
# Now reshape data into a table:
|
||||
# Step 1: get columns and index:
|
||||
timestamps = set()
|
||||
data_unpacked = {}
|
||||
for x in data_raw:
|
||||
for k in x.keys():
|
||||
if k == "timestamp":
|
||||
timestamps.update(x[k])
|
||||
else:
|
||||
data_unpacked[k] = x[k]
|
||||
timestamps = sorted(list(timestamps))
|
||||
dates = pd.to_datetime(timestamps, unit="s")
|
||||
df = pd.DataFrame(columns=dates, index=list(data_unpacked.keys()))
|
||||
for k, v in data_unpacked.items():
|
||||
if df is None:
|
||||
df = pd.DataFrame(columns=dates, index=[k])
|
||||
df.loc[k] = {pd.Timestamp(x["asOfDate"]): x["reportedValue"]["raw"] for x in v}
|
||||
|
||||
df.index = df.index.str.replace("^" + timescale, "", regex=True)
|
||||
|
||||
# Reorder table to match order on Yahoo website
|
||||
df = df.reindex([k for k in keys if k in df.index])
|
||||
df = df[sorted(df.columns, reverse=True)]
|
||||
|
||||
return df
|
||||
|
||||
6
yfinance/exceptions.py
Normal file
6
yfinance/exceptions.py
Normal file
@@ -0,0 +1,6 @@
|
||||
class YFinanceException(Exception):
|
||||
pass
|
||||
|
||||
|
||||
class YFinanceDataException(YFinanceException):
|
||||
pass
|
||||
@@ -29,7 +29,7 @@ from . import Ticker, utils
|
||||
from . import shared
|
||||
|
||||
|
||||
def download(tickers, start=None, end=None, actions=False, threads=True, ignore_tz=True,
|
||||
def download(tickers, start=None, end=None, actions=False, threads=True, ignore_tz=None,
|
||||
group_by='column', auto_adjust=False, back_adjust=False, repair=False, keepna=False,
|
||||
progress=True, period="max", show_errors=True, interval="1d", prepost=False,
|
||||
proxy=None, rounding=False, timeout=10):
|
||||
@@ -68,7 +68,7 @@ def download(tickers, start=None, end=None, actions=False, threads=True, ignore_
|
||||
How many threads to use for mass downloading. Default is True
|
||||
ignore_tz: bool
|
||||
When combining from different timezones, ignore that part of datetime.
|
||||
Default is True
|
||||
Default depends on interval. Intraday = False. Day+ = True.
|
||||
proxy: str
|
||||
Optional. Proxy server URL scheme. Default is None
|
||||
rounding: bool
|
||||
@@ -80,6 +80,14 @@ def download(tickers, start=None, end=None, actions=False, threads=True, ignore_
|
||||
seconds. (Can also be a fraction of a second e.g. 0.01)
|
||||
"""
|
||||
|
||||
if ignore_tz is None:
|
||||
# Set default value depending on interval
|
||||
if interval[1:] in ['m', 'h']:
|
||||
# Intraday
|
||||
ignore_tz = False
|
||||
else:
|
||||
ignore_tz = True
|
||||
|
||||
# create ticker list
|
||||
tickers = tickers if isinstance(
|
||||
tickers, (list, set, tuple)) else tickers.replace(',', ' ').split()
|
||||
@@ -199,10 +207,16 @@ def _download_one_threaded(ticker, start=None, end=None,
|
||||
actions=False, progress=True, period="max",
|
||||
interval="1d", prepost=False, proxy=None,
|
||||
keepna=False, rounding=False, timeout=10):
|
||||
data = _download_one(ticker, start, end, auto_adjust, back_adjust, repair,
|
||||
actions, period, interval, prepost, proxy, rounding,
|
||||
keepna, timeout)
|
||||
shared._DFS[ticker.upper()] = data
|
||||
try:
|
||||
data = _download_one(ticker, start, end, auto_adjust, back_adjust, repair,
|
||||
actions, period, interval, prepost, proxy, rounding,
|
||||
keepna, timeout)
|
||||
except Exception as e:
|
||||
# glob try/except needed as current thead implementation breaks if exception is raised.
|
||||
shared._DFS[ticker] = utils.empty_df()
|
||||
shared._ERRORS[ticker] = repr(e)
|
||||
else:
|
||||
shared._DFS[ticker.upper()] = data
|
||||
if progress:
|
||||
shared._PROGRESS_BAR.animate()
|
||||
|
||||
|
||||
0
yfinance/scrapers/__init__.py
Normal file
0
yfinance/scrapers/__init__.py
Normal file
118
yfinance/scrapers/analysis.py
Normal file
118
yfinance/scrapers/analysis.py
Normal file
@@ -0,0 +1,118 @@
|
||||
import pandas as pd
|
||||
|
||||
from yfinance import utils
|
||||
from yfinance.data import TickerData
|
||||
|
||||
|
||||
class Analysis:
|
||||
|
||||
def __init__(self, data: TickerData, proxy=None):
|
||||
self._data = data
|
||||
self.proxy = proxy
|
||||
|
||||
self._earnings_trend = None
|
||||
self._analyst_trend_details = None
|
||||
self._analyst_price_target = None
|
||||
self._rev_est = None
|
||||
self._eps_est = None
|
||||
self._already_scraped = False
|
||||
|
||||
@property
|
||||
def earnings_trend(self) -> pd.DataFrame:
|
||||
if self._earnings_trend is None:
|
||||
self._scrape(self.proxy)
|
||||
return self._earnings_trend
|
||||
|
||||
@property
|
||||
def analyst_trend_details(self) -> pd.DataFrame:
|
||||
if self._analyst_trend_details is None:
|
||||
self._scrape(self.proxy)
|
||||
return self._analyst_trend_details
|
||||
|
||||
@property
|
||||
def analyst_price_target(self) -> pd.DataFrame:
|
||||
if self._analyst_price_target is None:
|
||||
self._scrape(self.proxy)
|
||||
return self._analyst_price_target
|
||||
|
||||
@property
|
||||
def rev_est(self) -> pd.DataFrame:
|
||||
if self._rev_est is None:
|
||||
self._scrape(self.proxy)
|
||||
return self._rev_est
|
||||
|
||||
@property
|
||||
def eps_est(self) -> pd.DataFrame:
|
||||
if self._eps_est is None:
|
||||
self._scrape(self.proxy)
|
||||
return self._eps_est
|
||||
|
||||
def _scrape(self, proxy):
|
||||
if self._already_scraped:
|
||||
return
|
||||
self._already_scraped = True
|
||||
|
||||
# Analysis Data/Analyst Forecasts
|
||||
analysis_data = self._data.get_json_data_stores("analysis", proxy=proxy)
|
||||
try:
|
||||
analysis_data = analysis_data['QuoteSummaryStore']
|
||||
except KeyError as e:
|
||||
err_msg = "No analysis data found, symbol may be delisted"
|
||||
print('- %s: %s' % (self._data.ticker, err_msg))
|
||||
return
|
||||
|
||||
if isinstance(analysis_data.get('earningsTrend'), dict):
|
||||
try:
|
||||
analysis = pd.DataFrame(analysis_data['earningsTrend']['trend'])
|
||||
analysis['endDate'] = pd.to_datetime(analysis['endDate'])
|
||||
analysis.set_index('period', inplace=True)
|
||||
analysis.index = analysis.index.str.upper()
|
||||
analysis.index.name = 'Period'
|
||||
analysis.columns = utils.camel2title(analysis.columns)
|
||||
|
||||
dict_cols = []
|
||||
|
||||
for idx, row in analysis.iterrows():
|
||||
for colname, colval in row.items():
|
||||
if isinstance(colval, dict):
|
||||
dict_cols.append(colname)
|
||||
for k, v in colval.items():
|
||||
new_colname = colname + ' ' + \
|
||||
utils.camel2title([k])[0]
|
||||
analysis.loc[idx, new_colname] = v
|
||||
|
||||
self._earnings_trend = analysis[[
|
||||
c for c in analysis.columns if c not in dict_cols]]
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
try:
|
||||
self._analyst_trend_details = pd.DataFrame(analysis_data['recommendationTrend']['trend'])
|
||||
except Exception as e:
|
||||
self._analyst_trend_details = None
|
||||
try:
|
||||
self._analyst_price_target = pd.DataFrame(analysis_data['financialData'], index=[0])[
|
||||
['targetLowPrice', 'currentPrice', 'targetMeanPrice', 'targetHighPrice', 'numberOfAnalystOpinions']].T
|
||||
except Exception as e:
|
||||
self._analyst_price_target = None
|
||||
earnings_estimate = []
|
||||
revenue_estimate = []
|
||||
if self._analyst_trend_details is not None :
|
||||
for key in analysis_data['earningsTrend']['trend']:
|
||||
try:
|
||||
earnings_dict = key['earningsEstimate']
|
||||
earnings_dict['period'] = key['period']
|
||||
earnings_dict['endDate'] = key['endDate']
|
||||
earnings_estimate.append(earnings_dict)
|
||||
|
||||
revenue_dict = key['revenueEstimate']
|
||||
revenue_dict['period'] = key['period']
|
||||
revenue_dict['endDate'] = key['endDate']
|
||||
revenue_estimate.append(revenue_dict)
|
||||
except Exception as e:
|
||||
pass
|
||||
self._rev_est = pd.DataFrame(revenue_estimate)
|
||||
self._eps_est = pd.DataFrame(earnings_estimate)
|
||||
else:
|
||||
self._rev_est = pd.DataFrame()
|
||||
self._eps_est = pd.DataFrame()
|
||||
319
yfinance/scrapers/fundamentals.py
Normal file
319
yfinance/scrapers/fundamentals.py
Normal file
@@ -0,0 +1,319 @@
|
||||
import datetime
|
||||
import json
|
||||
|
||||
import pandas as pd
|
||||
import numpy as np
|
||||
|
||||
from yfinance import utils
|
||||
from yfinance.data import TickerData
|
||||
from yfinance.exceptions import YFinanceDataException, YFinanceException
|
||||
|
||||
|
||||
class Fundamentals:
|
||||
|
||||
def __init__(self, data: TickerData, proxy=None):
|
||||
self._data = data
|
||||
self.proxy = proxy
|
||||
|
||||
self._earnings = None
|
||||
self._financials = None
|
||||
self._shares = None
|
||||
|
||||
self._financials_data = None
|
||||
self._fin_data_quote = None
|
||||
self._basics_already_scraped = False
|
||||
self._financials = Financials(data)
|
||||
|
||||
@property
|
||||
def financials(self) -> "Financials":
|
||||
return self._financials
|
||||
|
||||
@property
|
||||
def earnings(self) -> dict:
|
||||
if self._earnings is None:
|
||||
self._scrape_earnings(self.proxy)
|
||||
return self._earnings
|
||||
|
||||
@property
|
||||
def shares(self) -> pd.DataFrame:
|
||||
if self._shares is None:
|
||||
self._scrape_shares(self.proxy)
|
||||
return self._shares
|
||||
|
||||
def _scrape_basics(self, proxy):
|
||||
if self._basics_already_scraped:
|
||||
return
|
||||
self._basics_already_scraped = True
|
||||
|
||||
self._financials_data = self._data.get_json_data_stores('financials', proxy)
|
||||
try:
|
||||
self._fin_data_quote = self._financials_data['QuoteSummaryStore']
|
||||
except KeyError:
|
||||
err_msg = "No financials data found, symbol may be delisted"
|
||||
print('- %s: %s' % (self._data.ticker, err_msg))
|
||||
return None
|
||||
|
||||
def _scrape_earnings(self, proxy):
|
||||
self._scrape_basics(proxy)
|
||||
# earnings
|
||||
self._earnings = {"yearly": pd.DataFrame(), "quarterly": pd.DataFrame()}
|
||||
if self._fin_data_quote is None:
|
||||
return
|
||||
if isinstance(self._fin_data_quote.get('earnings'), dict):
|
||||
try:
|
||||
earnings = self._fin_data_quote['earnings']['financialsChart']
|
||||
earnings['financialCurrency'] = self._fin_data_quote['earnings'].get('financialCurrency', 'USD')
|
||||
self._earnings['financialCurrency'] = earnings['financialCurrency']
|
||||
df = pd.DataFrame(earnings['yearly']).set_index('date')
|
||||
df.columns = utils.camel2title(df.columns)
|
||||
df.index.name = 'Year'
|
||||
self._earnings['yearly'] = df
|
||||
|
||||
df = pd.DataFrame(earnings['quarterly']).set_index('date')
|
||||
df.columns = utils.camel2title(df.columns)
|
||||
df.index.name = 'Quarter'
|
||||
self._earnings['quarterly'] = df
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
def _scrape_shares(self, proxy):
|
||||
self._scrape_basics(proxy)
|
||||
# shares outstanding
|
||||
try:
|
||||
# keep only years with non None data
|
||||
available_shares = [shares_data for shares_data in
|
||||
self._financials_data['QuoteTimeSeriesStore']['timeSeries']['annualBasicAverageShares']
|
||||
if
|
||||
shares_data]
|
||||
shares = pd.DataFrame(available_shares)
|
||||
shares['Year'] = shares['asOfDate'].agg(lambda x: int(x[:4]))
|
||||
shares.set_index('Year', inplace=True)
|
||||
shares.drop(columns=['dataId', 'asOfDate',
|
||||
'periodType', 'currencyCode'], inplace=True)
|
||||
shares.rename(
|
||||
columns={'reportedValue': "BasicShares"}, inplace=True)
|
||||
self._shares = shares
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
|
||||
class Financials:
|
||||
def __init__(self, data: TickerData):
|
||||
self._data = data
|
||||
self._income_time_series = {}
|
||||
self._balance_sheet_time_series = {}
|
||||
self._cash_flow_time_series = {}
|
||||
self._income_scraped = {}
|
||||
self._balance_sheet_scraped = {}
|
||||
self._cash_flow_scraped = {}
|
||||
|
||||
def get_income_time_series(self, freq="yearly", proxy=None) -> pd.DataFrame:
|
||||
res = self._income_time_series
|
||||
if freq not in res:
|
||||
res[freq] = self._fetch_time_series("income", freq, proxy=None)
|
||||
return res[freq]
|
||||
|
||||
def get_balance_sheet_time_series(self, freq="yearly", proxy=None) -> pd.DataFrame:
|
||||
res = self._balance_sheet_time_series
|
||||
if freq not in res:
|
||||
res[freq] = self._fetch_time_series("balance-sheet", freq, proxy=None)
|
||||
return res[freq]
|
||||
|
||||
def get_cash_flow_time_series(self, freq="yearly", proxy=None) -> pd.DataFrame:
|
||||
res = self._cash_flow_time_series
|
||||
if freq not in res:
|
||||
res[freq] = self._fetch_time_series("cash-flow", freq, proxy=None)
|
||||
return res[freq]
|
||||
|
||||
def _fetch_time_series(self, name, timescale, proxy=None):
|
||||
# Fetching time series preferred over scraping 'QuoteSummaryStore',
|
||||
# because it matches what Yahoo shows. But for some tickers returns nothing,
|
||||
# despite 'QuoteSummaryStore' containing valid data.
|
||||
|
||||
allowed_names = ["income", "balance-sheet", "cash-flow"]
|
||||
allowed_timescales = ["yearly", "quarterly"]
|
||||
|
||||
if name not in allowed_names:
|
||||
raise ValueError("Illegal argument: name must be one of: {}".format(allowed_names))
|
||||
if timescale not in allowed_timescales:
|
||||
raise ValueError("Illegal argument: timescale must be one of: {}".format(allowed_names))
|
||||
|
||||
try:
|
||||
statement = self._create_financials_table(name, timescale, proxy)
|
||||
|
||||
if statement is not None:
|
||||
return statement
|
||||
except YFinanceException as e:
|
||||
print(f"- {self._data.ticker}: Failed to create {name} financials table for reason: {repr(e)}")
|
||||
return pd.DataFrame()
|
||||
|
||||
def _create_financials_table(self, name, timescale, proxy):
|
||||
if name == "income":
|
||||
# Yahoo stores the 'income' table internally under 'financials' key
|
||||
name = "financials"
|
||||
|
||||
keys = self._get_datastore_keys(name, proxy)
|
||||
try:
|
||||
return self.get_financials_time_series(timescale, keys, proxy)
|
||||
except Exception as e:
|
||||
pass
|
||||
|
||||
def _get_datastore_keys(self, sub_page, proxy) -> list:
|
||||
data_stores = self._data.get_json_data_stores(sub_page, proxy)
|
||||
|
||||
# Step 1: get the keys:
|
||||
def _finditem1(key, obj):
|
||||
values = []
|
||||
if isinstance(obj, dict):
|
||||
if key in obj.keys():
|
||||
values.append(obj[key])
|
||||
for k, v in obj.items():
|
||||
values += _finditem1(key, v)
|
||||
elif isinstance(obj, list):
|
||||
for v in obj:
|
||||
values += _finditem1(key, v)
|
||||
return values
|
||||
|
||||
try:
|
||||
keys = _finditem1("key", data_stores['FinancialTemplateStore'])
|
||||
except KeyError as e:
|
||||
raise YFinanceDataException("Parsing FinancialTemplateStore failed, reason: {}".format(repr(e)))
|
||||
|
||||
if not keys:
|
||||
raise YFinanceDataException("No keys in FinancialTemplateStore")
|
||||
return keys
|
||||
|
||||
def get_financials_time_series(self, timescale, keys: list, proxy=None) -> pd.DataFrame:
|
||||
timescale_translation = {"yearly": "annual", "quarterly": "quarterly"}
|
||||
timescale = timescale_translation[timescale]
|
||||
|
||||
# Step 2: construct url:
|
||||
ts_url_base = \
|
||||
"https://query2.finance.yahoo.com/ws/fundamentals-timeseries/v1/finance/timeseries/{0}?symbol={0}" \
|
||||
.format(self._data.ticker)
|
||||
|
||||
url = ts_url_base + "&type=" + ",".join([timescale + k for k in keys])
|
||||
# Yahoo returns maximum 4 years or 5 quarters, regardless of start_dt:
|
||||
start_dt = datetime.datetime(2016, 12, 31)
|
||||
end = pd.Timestamp.utcnow().ceil("D")
|
||||
url += "&period1={}&period2={}".format(int(start_dt.timestamp()), int(end.timestamp()))
|
||||
|
||||
# Step 3: fetch and reshape data
|
||||
json_str = self._data.cache_get(url=url, proxy=proxy).text
|
||||
json_data = json.loads(json_str)
|
||||
data_raw = json_data["timeseries"]["result"]
|
||||
# data_raw = [v for v in data_raw if len(v) > 1] # Discard keys with no data
|
||||
for d in data_raw:
|
||||
del d["meta"]
|
||||
|
||||
# Now reshape data into a table:
|
||||
# Step 1: get columns and index:
|
||||
timestamps = set()
|
||||
data_unpacked = {}
|
||||
for x in data_raw:
|
||||
for k in x.keys():
|
||||
if k == "timestamp":
|
||||
timestamps.update(x[k])
|
||||
else:
|
||||
data_unpacked[k] = x[k]
|
||||
timestamps = sorted(list(timestamps))
|
||||
dates = pd.to_datetime(timestamps, unit="s")
|
||||
df = pd.DataFrame(columns=dates, index=list(data_unpacked.keys()))
|
||||
for k, v in data_unpacked.items():
|
||||
if df is None:
|
||||
df = pd.DataFrame(columns=dates, index=[k])
|
||||
df.loc[k] = {pd.Timestamp(x["asOfDate"]): x["reportedValue"]["raw"] for x in v}
|
||||
|
||||
df.index = df.index.str.replace("^" + timescale, "", regex=True)
|
||||
|
||||
# Reorder table to match order on Yahoo website
|
||||
df = df.reindex([k for k in keys if k in df.index])
|
||||
df = df[sorted(df.columns, reverse=True)]
|
||||
|
||||
return df
|
||||
|
||||
def get_income_scrape(self, freq="yearly", proxy=None) -> pd.DataFrame:
|
||||
res = self._income_scraped
|
||||
if freq not in res:
|
||||
res[freq] = self._scrape("income", freq, proxy=None)
|
||||
return res[freq]
|
||||
|
||||
def get_balance_sheet_scrape(self, freq="yearly", proxy=None) -> pd.DataFrame:
|
||||
res = self._balance_sheet_scraped
|
||||
if freq not in res:
|
||||
res[freq] = self._scrape("balance-sheet", freq, proxy=None)
|
||||
return res[freq]
|
||||
|
||||
def get_cash_flow_scrape(self, freq="yearly", proxy=None) -> pd.DataFrame:
|
||||
res = self._cash_flow_scraped
|
||||
if freq not in res:
|
||||
res[freq] = self._scrape("cash-flow", freq, proxy=None)
|
||||
return res[freq]
|
||||
|
||||
def _scrape(self, name, timescale, proxy=None):
|
||||
# Backup in case _fetch_time_series() fails to return data
|
||||
|
||||
allowed_names = ["income", "balance-sheet", "cash-flow"]
|
||||
allowed_timescales = ["yearly", "quarterly"]
|
||||
|
||||
if name not in allowed_names:
|
||||
raise ValueError("Illegal argument: name must be one of: {}".format(allowed_names))
|
||||
if timescale not in allowed_timescales:
|
||||
raise ValueError("Illegal argument: timescale must be one of: {}".format(allowed_names))
|
||||
|
||||
try:
|
||||
statement = self._create_financials_table_old(name, timescale, proxy)
|
||||
|
||||
if statement is not None:
|
||||
return statement
|
||||
except YFinanceException as e:
|
||||
print(f"- {self._data.ticker}: Failed to create financials table for {name} reason: {repr(e)}")
|
||||
return pd.DataFrame()
|
||||
|
||||
def _create_financials_table_old(self, name, timescale, proxy):
|
||||
data_stores = self._data.get_json_data_stores("financials", proxy)
|
||||
|
||||
# Fetch raw data
|
||||
if not "QuoteSummaryStore" in data_stores:
|
||||
raise YFinanceDataException(f"Yahoo not returning legacy financials data")
|
||||
data = data_stores["QuoteSummaryStore"]
|
||||
|
||||
if name == "cash-flow":
|
||||
key1 = "cashflowStatement"
|
||||
key2 = "cashflowStatements"
|
||||
elif name == "balance-sheet":
|
||||
key1 = "balanceSheet"
|
||||
key2 = "balanceSheetStatements"
|
||||
else:
|
||||
key1 = "incomeStatement"
|
||||
key2 = "incomeStatementHistory"
|
||||
key1 += "History"
|
||||
if timescale == "quarterly":
|
||||
key1 += "Quarterly"
|
||||
if key1 not in data or data[key1] is None or key2 not in data[key1]:
|
||||
raise YFinanceDataException(f"Yahoo not returning legacy {name} financials data")
|
||||
data = data[key1][key2]
|
||||
|
||||
# Tabulate
|
||||
df = pd.DataFrame(data)
|
||||
if len(df) == 0:
|
||||
raise YFinanceDataException(f"Yahoo not returning legacy {name} financials data")
|
||||
df = df.drop(columns=['maxAge'])
|
||||
for col in df.columns:
|
||||
df[col] = df[col].replace('-', np.nan)
|
||||
df.set_index('endDate', inplace=True)
|
||||
try:
|
||||
df.index = pd.to_datetime(df.index, unit='s')
|
||||
except ValueError:
|
||||
df.index = pd.to_datetime(df.index)
|
||||
df = df.T
|
||||
df.columns.name = ''
|
||||
df.index.name = 'Breakdown'
|
||||
# rename incorrect yahoo key
|
||||
df.rename(index={'treasuryStock': 'gainsLossesNotAffectingRetainedEarnings'}, inplace=True)
|
||||
|
||||
# Upper-case first letter, leave rest unchanged:
|
||||
s0 = df.index[0]
|
||||
df.index = [s[0].upper()+s[1:] for s in df.index]
|
||||
|
||||
return df
|
||||
66
yfinance/scrapers/holders.py
Normal file
66
yfinance/scrapers/holders.py
Normal file
@@ -0,0 +1,66 @@
|
||||
import pandas as pd
|
||||
|
||||
from yfinance.data import TickerData
|
||||
|
||||
class Holders:
|
||||
_SCRAPE_URL_ = 'https://finance.yahoo.com/quote'
|
||||
|
||||
def __init__(self, data: TickerData, proxy=None):
|
||||
self._data = data
|
||||
self.proxy = proxy
|
||||
|
||||
self._major = None
|
||||
self._institutional = None
|
||||
self._mutualfund = None
|
||||
|
||||
@property
|
||||
def major(self) -> pd.DataFrame:
|
||||
if self._major is None:
|
||||
self._scrape(self.proxy)
|
||||
return self._major
|
||||
|
||||
@property
|
||||
def institutional(self) -> pd.DataFrame:
|
||||
if self._institutional is None:
|
||||
self._scrape(self.proxy)
|
||||
return self._institutional
|
||||
|
||||
@property
|
||||
def mutualfund(self) -> pd.DataFrame:
|
||||
if self._mutualfund is None:
|
||||
self._scrape(self.proxy)
|
||||
return self._mutualfund
|
||||
|
||||
def _scrape(self, proxy):
|
||||
ticker_url = "{}/{}".format(self._SCRAPE_URL_, self._data.ticker)
|
||||
try:
|
||||
resp = self._data.cache_get(ticker_url + '/holders', proxy)
|
||||
holders = pd.read_html(resp.text)
|
||||
except Exception:
|
||||
holders = []
|
||||
|
||||
if len(holders) >= 3:
|
||||
self._major = holders[0]
|
||||
self._institutional = holders[1]
|
||||
self._mutualfund = holders[2]
|
||||
elif len(holders) >= 2:
|
||||
self._major = holders[0]
|
||||
self._institutional = holders[1]
|
||||
elif len(holders) >= 1:
|
||||
self._major = holders[0]
|
||||
|
||||
if self._institutional is not None:
|
||||
if 'Date Reported' in self._institutional:
|
||||
self._institutional['Date Reported'] = pd.to_datetime(
|
||||
self._institutional['Date Reported'])
|
||||
if '% Out' in self._institutional:
|
||||
self._institutional['% Out'] = self._institutional[
|
||||
'% Out'].str.replace('%', '').astype(float) / 100
|
||||
|
||||
if self._mutualfund is not None:
|
||||
if 'Date Reported' in self._mutualfund:
|
||||
self._mutualfund['Date Reported'] = pd.to_datetime(
|
||||
self._mutualfund['Date Reported'])
|
||||
if '% Out' in self._mutualfund:
|
||||
self._mutualfund['% Out'] = self._mutualfund[
|
||||
'% Out'].str.replace('%', '').astype(float) / 100
|
||||
296
yfinance/scrapers/quote.py
Normal file
296
yfinance/scrapers/quote.py
Normal file
@@ -0,0 +1,296 @@
|
||||
import datetime
|
||||
import json
|
||||
|
||||
import pandas as pd
|
||||
|
||||
from yfinance import utils
|
||||
from yfinance.data import TickerData
|
||||
|
||||
|
||||
info_retired_keys_price = {"currentPrice", "dayHigh", "dayLow", "open", "previousClose", "volume", "volume24Hr"}
|
||||
info_retired_keys_price.update({"regularMarket"+s for s in ["DayHigh", "DayLow", "Open", "PreviousClose", "Price", "Volume"]})
|
||||
info_retired_keys_price.update({"fiftyTwoWeekLow", "fiftyTwoWeekHigh", "fiftyTwoWeekChange", "52WeekChange", "fiftyDayAverage", "twoHundredDayAverage"})
|
||||
info_retired_keys_price.update({"averageDailyVolume10Day", "averageVolume10days", "averageVolume"})
|
||||
info_retired_keys_exchange = {"currency", "exchange", "exchangeTimezoneName", "exchangeTimezoneShortName", "quoteType"}
|
||||
info_retired_keys_marketCap = {"marketCap"}
|
||||
info_retired_keys_symbol = {"symbol"}
|
||||
info_retired_keys = info_retired_keys_price | info_retired_keys_exchange | info_retired_keys_marketCap | info_retired_keys_symbol
|
||||
|
||||
|
||||
PRUNE_INFO = True
|
||||
# PRUNE_INFO = False
|
||||
|
||||
|
||||
from collections.abc import MutableMapping
|
||||
class InfoDictWrapper(MutableMapping):
|
||||
""" Simple wrapper around info dict, intercepting 'gets' to
|
||||
print how-to-migrate messages for specific keys. Requires
|
||||
override dict API"""
|
||||
|
||||
def __init__(self, info):
|
||||
self.info = info
|
||||
|
||||
def keys(self):
|
||||
return self.info.keys()
|
||||
|
||||
def __str__(self):
|
||||
return self.info.__str__()
|
||||
|
||||
def __repr__(self):
|
||||
return self.info.__repr__()
|
||||
|
||||
def __contains__(self, k):
|
||||
return k in self.info.keys()
|
||||
|
||||
def __getitem__(self, k):
|
||||
if k in info_retired_keys_price:
|
||||
print(f"Price data removed from info (key='{k}'). Use Ticker.fast_info or history() instead")
|
||||
return None
|
||||
elif k in info_retired_keys_exchange:
|
||||
print(f"Exchange data removed from info (key='{k}'). Use Ticker.fast_info or Ticker.get_history_metadata() instead")
|
||||
return None
|
||||
elif k in info_retired_keys_marketCap:
|
||||
print(f"Market cap removed from info (key='{k}'). Use Ticker.fast_info instead")
|
||||
return None
|
||||
elif k in info_retired_keys_symbol:
|
||||
print(f"Symbol removed from info (key='{k}'). You know this already")
|
||||
return None
|
||||
return self.info[self._keytransform(k)]
|
||||
|
||||
def __setitem__(self, k, value):
|
||||
self.info[self._keytransform(k)] = value
|
||||
|
||||
def __delitem__(self, k):
|
||||
del self.info[self._keytransform(k)]
|
||||
|
||||
def __iter__(self):
|
||||
return iter(self.info)
|
||||
|
||||
def __len__(self):
|
||||
return len(self.info)
|
||||
|
||||
def _keytransform(self, k):
|
||||
return k
|
||||
|
||||
|
||||
|
||||
class Quote:
|
||||
|
||||
def __init__(self, data: TickerData, proxy=None):
|
||||
self._data = data
|
||||
self.proxy = proxy
|
||||
|
||||
self._info = None
|
||||
self._retired_info = None
|
||||
self._sustainability = None
|
||||
self._recommendations = None
|
||||
self._calendar = None
|
||||
|
||||
self._already_scraped = False
|
||||
self._already_scraped_complementary = False
|
||||
|
||||
@property
|
||||
def info(self) -> dict:
|
||||
if self._info is None:
|
||||
self._scrape(self.proxy)
|
||||
self._scrape_complementary(self.proxy)
|
||||
|
||||
return self._info
|
||||
|
||||
@property
|
||||
def sustainability(self) -> pd.DataFrame:
|
||||
if self._sustainability is None:
|
||||
self._scrape(self.proxy)
|
||||
return self._sustainability
|
||||
|
||||
@property
|
||||
def recommendations(self) -> pd.DataFrame:
|
||||
if self._recommendations is None:
|
||||
self._scrape(self.proxy)
|
||||
return self._recommendations
|
||||
|
||||
@property
|
||||
def calendar(self) -> pd.DataFrame:
|
||||
if self._calendar is None:
|
||||
self._scrape(self.proxy)
|
||||
return self._calendar
|
||||
|
||||
def _scrape(self, proxy):
|
||||
if self._already_scraped:
|
||||
return
|
||||
self._already_scraped = True
|
||||
|
||||
# get info and sustainability
|
||||
json_data = self._data.get_json_data_stores(proxy=proxy)
|
||||
try:
|
||||
quote_summary_store = json_data['QuoteSummaryStore']
|
||||
except KeyError:
|
||||
err_msg = "No summary info found, symbol may be delisted"
|
||||
print('- %s: %s' % (self._data.ticker, err_msg))
|
||||
return None
|
||||
|
||||
# sustainability
|
||||
d = {}
|
||||
try:
|
||||
if isinstance(quote_summary_store.get('esgScores'), dict):
|
||||
for item in quote_summary_store['esgScores']:
|
||||
if not isinstance(quote_summary_store['esgScores'][item], (dict, list)):
|
||||
d[item] = quote_summary_store['esgScores'][item]
|
||||
|
||||
s = pd.DataFrame(index=[0], data=d)[-1:].T
|
||||
s.columns = ['Value']
|
||||
s.index.name = '%.f-%.f' % (
|
||||
s[s.index == 'ratingYear']['Value'].values[0],
|
||||
s[s.index == 'ratingMonth']['Value'].values[0])
|
||||
|
||||
self._sustainability = s[~s.index.isin(
|
||||
['maxAge', 'ratingYear', 'ratingMonth'])]
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
self._info = {}
|
||||
try:
|
||||
items = ['summaryProfile', 'financialData', 'quoteType',
|
||||
'defaultKeyStatistics', 'assetProfile', 'summaryDetail']
|
||||
for item in items:
|
||||
if isinstance(quote_summary_store.get(item), dict):
|
||||
self._info.update(quote_summary_store[item])
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
# For ETFs, provide this valuable data: the top holdings of the ETF
|
||||
try:
|
||||
if 'topHoldings' in quote_summary_store:
|
||||
self._info.update(quote_summary_store['topHoldings'])
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
try:
|
||||
if not isinstance(quote_summary_store.get('summaryDetail'), dict):
|
||||
# For some reason summaryDetail did not give any results. The price dict
|
||||
# usually has most of the same info
|
||||
self._info.update(quote_summary_store.get('price', {}))
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
try:
|
||||
# self._info['regularMarketPrice'] = self._info['regularMarketOpen']
|
||||
self._info['regularMarketPrice'] = quote_summary_store.get('price', {}).get(
|
||||
'regularMarketPrice', self._info.get('regularMarketOpen', None))
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
try:
|
||||
self._info['preMarketPrice'] = quote_summary_store.get('price', {}).get(
|
||||
'preMarketPrice', self._info.get('preMarketPrice', None))
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
self._info['logo_url'] = ""
|
||||
try:
|
||||
if not 'website' in self._info:
|
||||
self._info['logo_url'] = 'https://logo.clearbit.com/%s.com' % \
|
||||
self._info['shortName'].split(' ')[0].split(',')[0]
|
||||
else:
|
||||
domain = self._info['website'].split(
|
||||
'://')[1].split('/')[0].replace('www.', '')
|
||||
self._info['logo_url'] = 'https://logo.clearbit.com/%s' % domain
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
# Delete redundant info[] keys, because values can be accessed faster
|
||||
# elsewhere - e.g. price keys. Hope is reduces Yahoo spam effect.
|
||||
# But record the dropped keys, because in rare cases they are needed.
|
||||
self._retired_info = {}
|
||||
for k in info_retired_keys:
|
||||
if k in self._info:
|
||||
self._retired_info[k] = self._info[k]
|
||||
if PRUNE_INFO:
|
||||
del self._info[k]
|
||||
if PRUNE_INFO:
|
||||
# InfoDictWrapper will explain how to access above data elsewhere
|
||||
self._info = InfoDictWrapper(self._info)
|
||||
|
||||
# events
|
||||
try:
|
||||
cal = pd.DataFrame(quote_summary_store['calendarEvents']['earnings'])
|
||||
cal['earningsDate'] = pd.to_datetime(
|
||||
cal['earningsDate'], unit='s')
|
||||
self._calendar = cal.T
|
||||
self._calendar.index = utils.camel2title(self._calendar.index)
|
||||
self._calendar.columns = ['Value']
|
||||
except Exception as e:
|
||||
pass
|
||||
|
||||
# analyst recommendations
|
||||
try:
|
||||
rec = pd.DataFrame(
|
||||
quote_summary_store['upgradeDowngradeHistory']['history'])
|
||||
rec['earningsDate'] = pd.to_datetime(
|
||||
rec['epochGradeDate'], unit='s')
|
||||
rec.set_index('earningsDate', inplace=True)
|
||||
rec.index.name = 'Date'
|
||||
rec.columns = utils.camel2title(rec.columns)
|
||||
self._recommendations = rec[[
|
||||
'Firm', 'To Grade', 'From Grade', 'Action']].sort_index()
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
def _scrape_complementary(self, proxy):
|
||||
if self._already_scraped_complementary:
|
||||
return
|
||||
self._already_scraped_complementary = True
|
||||
|
||||
self._scrape(proxy)
|
||||
if self._info is None:
|
||||
return
|
||||
|
||||
# Complementary key-statistics. For now just want 'trailing PEG ratio'
|
||||
keys = {"trailingPegRatio"}
|
||||
if keys:
|
||||
# Simplified the original scrape code for key-statistics. Very expensive for fetching
|
||||
# just one value, best if scraping most/all:
|
||||
#
|
||||
# p = _re.compile(r'root\.App\.main = (.*);')
|
||||
# url = 'https://finance.yahoo.com/quote/{}/key-statistics?p={}'.format(self._ticker.ticker, self._ticker.ticker)
|
||||
# try:
|
||||
# r = session.get(url, headers=utils.user_agent_headers)
|
||||
# data = _json.loads(p.findall(r.text)[0])
|
||||
# key_stats = data['context']['dispatcher']['stores']['QuoteTimeSeriesStore']["timeSeries"]
|
||||
# for k in keys:
|
||||
# if k not in key_stats or len(key_stats[k])==0:
|
||||
# # Yahoo website prints N/A, indicates Yahoo lacks necessary data to calculate
|
||||
# v = None
|
||||
# else:
|
||||
# # Select most recent (last) raw value in list:
|
||||
# v = key_stats[k][-1]["reportedValue"]["raw"]
|
||||
# self._info[k] = v
|
||||
# except Exception:
|
||||
# raise
|
||||
# pass
|
||||
#
|
||||
# For just one/few variable is faster to query directly:
|
||||
url = "https://query1.finance.yahoo.com/ws/fundamentals-timeseries/v1/finance/timeseries/{}?symbol={}".format(
|
||||
self._data.ticker, self._data.ticker)
|
||||
for k in keys:
|
||||
url += "&type=" + k
|
||||
# Request 6 months of data
|
||||
start = pd.Timestamp.utcnow().floor("D") - datetime.timedelta(days=365 // 2)
|
||||
start = int(start.timestamp())
|
||||
end = pd.Timestamp.utcnow().ceil("D")
|
||||
end = int(end.timestamp())
|
||||
url += f"&period1={start}&period2={end}"
|
||||
|
||||
json_str = self._data.cache_get(url=url, proxy=proxy).text
|
||||
json_data = json.loads(json_str)
|
||||
try:
|
||||
key_stats = json_data["timeseries"]["result"][0]
|
||||
if k not in key_stats:
|
||||
# Yahoo website prints N/A, indicates Yahoo lacks necessary data to calculate
|
||||
v = None
|
||||
else:
|
||||
# Select most recent (last) raw value in list:
|
||||
v = key_stats[k][-1]["reportedValue"]["raw"]
|
||||
except Exception:
|
||||
v = None
|
||||
self._info[k] = v
|
||||
7
yfinance/scrapers/yahoo-keys.txt
Normal file
7
yfinance/scrapers/yahoo-keys.txt
Normal file
@@ -0,0 +1,7 @@
|
||||
daf93e37cbf219cd4c1f3f74ec4551265ec5565b99e8c9322dccd6872941cf13c818cbb88cba6f530e643b4e2329b17ec7161f4502ce6a02bb0dbbe5fc0d0474
|
||||
ad4d90b3c9f2e1d156ef98eadfa0ff93e4042f6960e54aa2a13f06f528e6b50ba4265a26a1fd5b9cd3db0d268a9c34e1d080592424309429a58bce4adc893c87
|
||||
e9a8ab8e5620b712ebc2fb4f33d5c8b9c80c0d07e8c371911c785cf674789f1747d76a909510158a7b7419e86857f2d7abbd777813ff64840e4cbc514d12bcae
|
||||
6ae2523aeafa283dad746556540145bf603f44edbf37ad404d3766a8420bb5eb1d3738f52a227b88283cca9cae44060d5f0bba84b6a495082589f5fe7acbdc9e
|
||||
3365117c2a368ffa5df7313a4a84988f73926a86358e8eea9497c5ff799ce27d104b68e5f2fbffa6f8f92c1fef41765a7066fa6bcf050810a9c4c7872fd3ebf0
|
||||
15d8f57919857d5a5358d2082c7ef0f1129cfacd2a6480333dcfb954b7bb67d820abefebfdb0eaa6ef18a1c57f617b67d7e7b0ec040403b889630ae5db5a4dbb
|
||||
db9630d707a7d0953ac795cd8db1ca9ca6c9d8239197cdfda24b4e0ec9c37eaec4db82dab68b8f606ab7b5b4af3e65dab50606f8cf508269ec927e6ee605fb78
|
||||
@@ -30,6 +30,9 @@ from .base import TickerBase
|
||||
|
||||
|
||||
class Ticker(TickerBase):
|
||||
def __init__(self, ticker, session=None):
|
||||
super(Ticker, self).__init__(ticker, session=session)
|
||||
self._expirations = {}
|
||||
|
||||
def __repr__(self):
|
||||
return 'yfinance.Ticker object <%s>' % self.ticker
|
||||
@@ -99,39 +102,43 @@ class Ticker(TickerBase):
|
||||
return self.get_isin()
|
||||
|
||||
@property
|
||||
def major_holders(self):
|
||||
def major_holders(self) -> _pd.DataFrame:
|
||||
return self.get_major_holders()
|
||||
|
||||
@property
|
||||
def institutional_holders(self):
|
||||
def institutional_holders(self) -> _pd.DataFrame:
|
||||
return self.get_institutional_holders()
|
||||
|
||||
@property
|
||||
def mutualfund_holders(self):
|
||||
def mutualfund_holders(self) -> _pd.DataFrame:
|
||||
return self.get_mutualfund_holders()
|
||||
|
||||
@property
|
||||
def dividends(self):
|
||||
def dividends(self) -> _pd.Series:
|
||||
return self.get_dividends()
|
||||
|
||||
@property
|
||||
def splits(self):
|
||||
def capital_gains(self):
|
||||
return self.get_capital_gains()
|
||||
|
||||
@property
|
||||
def splits(self) -> _pd.Series:
|
||||
return self.get_splits()
|
||||
|
||||
@property
|
||||
def actions(self):
|
||||
def actions(self) -> _pd.DataFrame:
|
||||
return self.get_actions()
|
||||
|
||||
@property
|
||||
def shares(self):
|
||||
def shares(self) -> _pd.DataFrame :
|
||||
return self.get_shares()
|
||||
|
||||
@property
|
||||
def info(self):
|
||||
def info(self) -> dict:
|
||||
return self.get_info()
|
||||
|
||||
@property
|
||||
def calendar(self):
|
||||
def calendar(self) -> _pd.DataFrame:
|
||||
return self.get_calendar()
|
||||
|
||||
@property
|
||||
@@ -139,63 +146,87 @@ class Ticker(TickerBase):
|
||||
return self.get_recommendations()
|
||||
|
||||
@property
|
||||
def earnings(self):
|
||||
def earnings(self) -> _pd.DataFrame:
|
||||
return self.get_earnings()
|
||||
|
||||
@property
|
||||
def quarterly_earnings(self):
|
||||
def quarterly_earnings(self) -> _pd.DataFrame:
|
||||
return self.get_earnings(freq='quarterly')
|
||||
|
||||
@property
|
||||
def income_stmt(self):
|
||||
return self.get_income_stmt()
|
||||
def income_stmt(self) -> _pd.DataFrame:
|
||||
return self.get_income_stmt(pretty=True)
|
||||
|
||||
@property
|
||||
def quarterly_income_stmt(self):
|
||||
return self.get_income_stmt(freq='quarterly')
|
||||
def quarterly_income_stmt(self) -> _pd.DataFrame:
|
||||
return self.get_income_stmt(pretty=True, freq='quarterly')
|
||||
|
||||
@property
|
||||
def balance_sheet(self):
|
||||
return self.get_balance_sheet()
|
||||
def incomestmt(self) -> _pd.DataFrame:
|
||||
return self.income_stmt
|
||||
|
||||
@property
|
||||
def quarterly_balance_sheet(self):
|
||||
return self.get_balance_sheet(freq='quarterly')
|
||||
def quarterly_incomestmt(self) -> _pd.DataFrame:
|
||||
return self.quarterly_income_stmt
|
||||
|
||||
@property
|
||||
def balancesheet(self):
|
||||
def financials(self) -> _pd.DataFrame:
|
||||
return self.income_stmt
|
||||
|
||||
@property
|
||||
def quarterly_financials(self) -> _pd.DataFrame:
|
||||
return self.quarterly_income_stmt
|
||||
|
||||
@property
|
||||
def balance_sheet(self) -> _pd.DataFrame:
|
||||
return self.get_balance_sheet(pretty=True)
|
||||
|
||||
@property
|
||||
def quarterly_balance_sheet(self) -> _pd.DataFrame:
|
||||
return self.get_balance_sheet(pretty=True, freq='quarterly')
|
||||
|
||||
@property
|
||||
def balancesheet(self) -> _pd.DataFrame:
|
||||
return self.balance_sheet
|
||||
|
||||
@property
|
||||
def quarterly_balancesheet(self):
|
||||
def quarterly_balancesheet(self) -> _pd.DataFrame:
|
||||
return self.quarterly_balance_sheet
|
||||
|
||||
@property
|
||||
def cashflow(self):
|
||||
return self.get_cashflow(freq="yearly")
|
||||
def cash_flow(self) -> _pd.DataFrame:
|
||||
return self.get_cash_flow(pretty=True, freq="yearly")
|
||||
|
||||
@property
|
||||
def quarterly_cashflow(self):
|
||||
return self.get_cashflow(freq='quarterly')
|
||||
def quarterly_cash_flow(self) -> _pd.DataFrame:
|
||||
return self.get_cash_flow(pretty=True, freq='quarterly')
|
||||
|
||||
@property
|
||||
def cashflow(self) -> _pd.DataFrame:
|
||||
return self.cash_flow
|
||||
|
||||
@property
|
||||
def quarterly_cashflow(self) -> _pd.DataFrame:
|
||||
return self.quarterly_cash_flow
|
||||
|
||||
@property
|
||||
def recommendations_summary(self):
|
||||
return self.get_recommendations_summary()
|
||||
|
||||
@property
|
||||
def analyst_price_target(self):
|
||||
def analyst_price_target(self) -> _pd.DataFrame:
|
||||
return self.get_analyst_price_target()
|
||||
|
||||
@property
|
||||
def revenue_forecasts(self):
|
||||
def revenue_forecasts(self) -> _pd.DataFrame:
|
||||
return self.get_rev_forecast()
|
||||
|
||||
@property
|
||||
def sustainability(self):
|
||||
def sustainability(self) -> _pd.DataFrame:
|
||||
return self.get_sustainability()
|
||||
|
||||
@property
|
||||
def options(self):
|
||||
def options(self) -> tuple:
|
||||
if not self._expirations:
|
||||
self._download_options()
|
||||
return tuple(self._expirations.keys())
|
||||
@@ -205,17 +236,17 @@ class Ticker(TickerBase):
|
||||
return self.get_news()
|
||||
|
||||
@property
|
||||
def earnings_trend(self):
|
||||
def earnings_trend(self) -> _pd.DataFrame:
|
||||
return self.get_earnings_trend()
|
||||
|
||||
@property
|
||||
def earnings_history(self):
|
||||
return self.get_earnings_history()
|
||||
|
||||
@property
|
||||
def earnings_dates(self):
|
||||
def earnings_dates(self) -> _pd.DataFrame:
|
||||
return self.get_earnings_dates()
|
||||
|
||||
@property
|
||||
def earnings_forecasts(self):
|
||||
def earnings_forecasts(self) -> _pd.DataFrame:
|
||||
return self.get_earnings_forecast()
|
||||
|
||||
@property
|
||||
def history_metadata(self) -> dict:
|
||||
return self.get_history_metadata()
|
||||
|
||||
@@ -34,12 +34,8 @@ class Tickers:
|
||||
tickers = tickers if isinstance(
|
||||
tickers, list) else tickers.replace(',', ' ').split()
|
||||
self.symbols = [ticker.upper() for ticker in tickers]
|
||||
ticker_objects = {}
|
||||
self.tickers = {ticker:Ticker(ticker, session=session) for ticker in self.symbols}
|
||||
|
||||
for ticker in self.symbols:
|
||||
ticker_objects[ticker] = Ticker(ticker, session=session)
|
||||
|
||||
self.tickers = ticker_objects
|
||||
# self.tickers = _namedtuple(
|
||||
# "Tickers", ticker_objects.keys(), rename=True
|
||||
# )(*ticker_objects.values())
|
||||
|
||||
@@ -22,7 +22,8 @@
|
||||
from __future__ import print_function
|
||||
|
||||
import datetime as _datetime
|
||||
from typing import Dict, Union
|
||||
import dateutil as _dateutil
|
||||
from typing import Dict, Union, List, Optional
|
||||
|
||||
import pytz as _tz
|
||||
import requests as _requests
|
||||
@@ -48,6 +49,18 @@ user_agent_headers = {
|
||||
'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/39.0.2171.95 Safari/537.36'}
|
||||
|
||||
|
||||
# From https://stackoverflow.com/a/59128615
|
||||
from types import FunctionType
|
||||
from inspect import getmembers
|
||||
def attributes(obj):
|
||||
disallowed_names = {
|
||||
name for name, value in getmembers(type(obj))
|
||||
if isinstance(value, FunctionType)}
|
||||
return {
|
||||
name: getattr(obj, name) for name in dir(obj)
|
||||
if name[0] != '_' and name not in disallowed_names and hasattr(obj, name)}
|
||||
|
||||
|
||||
def is_isin(string):
|
||||
return bool(_re.match("^([A-Z]{2})([A-Z0-9]{9})([0-9]{1})$", string))
|
||||
|
||||
@@ -216,7 +229,7 @@ def format_annual_financial_statement(level_detail, annual_dicts, annual_order,
|
||||
else:
|
||||
_statement = Annual
|
||||
|
||||
_statement.index = camel2title(_statement.T)
|
||||
_statement.index = camel2title(_statement.T.index)
|
||||
_statement['level_detail'] = level_detail
|
||||
_statement = _statement.set_index([_statement.index, 'level_detail'])
|
||||
_statement = _statement[sorted(_statement.columns, reverse=True)]
|
||||
@@ -241,8 +254,55 @@ def format_quarterly_financial_statement(_statement, level_detail, order):
|
||||
return _statement
|
||||
|
||||
|
||||
def camel2title(o):
|
||||
return [_re.sub("([a-z])([A-Z])", r"\g<1> \g<2>", i).title() for i in o]
|
||||
def camel2title(strings: List[str], sep: str = ' ', acronyms: Optional[List[str]] = None) -> List[str]:
|
||||
if isinstance(strings, str) or not hasattr(strings, '__iter__'):
|
||||
raise TypeError("camel2title() 'strings' argument must be iterable of strings")
|
||||
if len(strings) == 0:
|
||||
return strings
|
||||
if not isinstance(strings[0], str):
|
||||
raise TypeError("camel2title() 'strings' argument must be iterable of strings")
|
||||
if not isinstance(sep, str) or len(sep) != 1:
|
||||
raise ValueError(f"camel2title() 'sep' argument = '{sep}' must be single character")
|
||||
if _re.match("[a-zA-Z0-9]", sep):
|
||||
raise ValueError(f"camel2title() 'sep' argument = '{sep}' cannot be alpha-numeric")
|
||||
if _re.escape(sep) != sep and sep not in {' ', '-'}:
|
||||
# Permit some exceptions, I don't understand why they get escaped
|
||||
raise ValueError(f"camel2title() 'sep' argument = '{sep}' cannot be special character")
|
||||
|
||||
if acronyms is None:
|
||||
pat = "([a-z])([A-Z])"
|
||||
rep = rf"\g<1>{sep}\g<2>"
|
||||
return [_re.sub(pat, rep, s).title() for s in strings]
|
||||
|
||||
# Handling acronyms requires more care. Assumes Yahoo returns acronym strings upper-case
|
||||
if isinstance(acronyms, str) or not hasattr(acronyms, '__iter__') or not isinstance(acronyms[0], str):
|
||||
raise TypeError("camel2title() 'acronyms' argument must be iterable of strings")
|
||||
for a in acronyms:
|
||||
if not _re.match("^[A-Z]+$", a):
|
||||
raise ValueError(f"camel2title() 'acronyms' argument must only contain upper-case, but '{a}' detected")
|
||||
|
||||
# Insert 'sep' between lower-then-upper-case
|
||||
pat = "([a-z])([A-Z])"
|
||||
rep = rf"\g<1>{sep}\g<2>"
|
||||
strings = [_re.sub(pat, rep, s) for s in strings]
|
||||
|
||||
# Insert 'sep' after acronyms
|
||||
for a in acronyms:
|
||||
pat = f"({a})([A-Z][a-z])"
|
||||
rep = rf"\g<1>{sep}\g<2>"
|
||||
strings = [_re.sub(pat, rep, s) for s in strings]
|
||||
|
||||
# Apply str.title() to non-acronym words
|
||||
strings = [s.split(sep) for s in strings]
|
||||
strings = [[j.title() if not j in acronyms else j for j in s] for s in strings]
|
||||
strings = [sep.join(s) for s in strings]
|
||||
|
||||
return strings
|
||||
|
||||
|
||||
def snake_case_2_camelCase(s):
|
||||
sc = s.split('_')[0] + ''.join(x.title() for x in s.split('_')[1:])
|
||||
return sc
|
||||
|
||||
|
||||
def _parse_user_dt(dt, exchange_tz):
|
||||
@@ -262,7 +322,21 @@ def _parse_user_dt(dt, exchange_tz):
|
||||
return dt
|
||||
|
||||
|
||||
def _interval_to_timedelta(interval):
|
||||
if interval == "1mo":
|
||||
return _dateutil.relativedelta.relativedelta(months=1)
|
||||
elif interval == "3mo":
|
||||
return _dateutil.relativedelta.relativedelta(months=3)
|
||||
elif interval == "1y":
|
||||
return _dateutil.relativedelta.relativedelta(years=1)
|
||||
elif interval == "1wk":
|
||||
return _pd.Timedelta(days=7, unit='d')
|
||||
else:
|
||||
return _pd.Timedelta(interval)
|
||||
|
||||
|
||||
def auto_adjust(data):
|
||||
col_order = data.columns
|
||||
df = data.copy()
|
||||
ratio = df["Close"] / df["Adj Close"]
|
||||
df["Adj Open"] = df["Open"] / ratio
|
||||
@@ -278,13 +352,13 @@ def auto_adjust(data):
|
||||
"Adj Low": "Low", "Adj Close": "Close"
|
||||
}, inplace=True)
|
||||
|
||||
df = df[["Open", "High", "Low", "Close", "Volume"]]
|
||||
return df[["Open", "High", "Low", "Close", "Volume"]]
|
||||
return df[[c for c in col_order if c in df.columns]]
|
||||
|
||||
|
||||
def back_adjust(data):
|
||||
""" back-adjusted data to mimic true historical prices """
|
||||
|
||||
col_order = data.columns
|
||||
df = data.copy()
|
||||
ratio = df["Adj Close"] / df["Close"]
|
||||
df["Adj Open"] = df["Open"] * ratio
|
||||
@@ -300,7 +374,7 @@ def back_adjust(data):
|
||||
"Adj Low": "Low"
|
||||
}, inplace=True)
|
||||
|
||||
return df[["Open", "High", "Low", "Close", "Volume"]]
|
||||
return df[[c for c in col_order if c in df.columns]]
|
||||
|
||||
|
||||
def parse_quotes(data):
|
||||
@@ -332,6 +406,8 @@ def parse_quotes(data):
|
||||
def parse_actions(data):
|
||||
dividends = _pd.DataFrame(
|
||||
columns=["Dividends"], index=_pd.DatetimeIndex([]))
|
||||
capital_gains = _pd.DataFrame(
|
||||
columns=["Capital Gains"], index=_pd.DatetimeIndex([]))
|
||||
splits = _pd.DataFrame(
|
||||
columns=["Stock Splits"], index=_pd.DatetimeIndex([]))
|
||||
|
||||
@@ -342,9 +418,16 @@ def parse_actions(data):
|
||||
dividends.set_index("date", inplace=True)
|
||||
dividends.index = _pd.to_datetime(dividends.index, unit="s")
|
||||
dividends.sort_index(inplace=True)
|
||||
|
||||
dividends.columns = ["Dividends"]
|
||||
|
||||
if "capitalGains" in data["events"]:
|
||||
capital_gains = _pd.DataFrame(
|
||||
data=list(data["events"]["capitalGains"].values()))
|
||||
capital_gains.set_index("date", inplace=True)
|
||||
capital_gains.index = _pd.to_datetime(capital_gains.index, unit="s")
|
||||
capital_gains.sort_index(inplace=True)
|
||||
capital_gains.columns = ["Capital Gains"]
|
||||
|
||||
if "splits" in data["events"]:
|
||||
splits = _pd.DataFrame(
|
||||
data=list(data["events"]["splits"].values()))
|
||||
@@ -355,7 +438,7 @@ def parse_actions(data):
|
||||
splits["denominator"]
|
||||
splits = splits[["Stock Splits"]]
|
||||
|
||||
return dividends, splits
|
||||
return dividends, splits, capital_gains
|
||||
|
||||
|
||||
def set_df_tz(df, interval, tz):
|
||||
@@ -365,6 +448,35 @@ def set_df_tz(df, interval, tz):
|
||||
return df
|
||||
|
||||
|
||||
def fix_Yahoo_returning_prepost_unrequested(quotes, interval, metadata):
|
||||
# Sometimes Yahoo returns post-market data despite not requesting it.
|
||||
# Normally happens on half-day early closes.
|
||||
#
|
||||
# And sometimes returns pre-market data despite not requesting it.
|
||||
# E.g. some London tickers.
|
||||
tps_df = metadata["tradingPeriods"]
|
||||
tps_df["_date"] = tps_df.index.date
|
||||
quotes["_date"] = quotes.index.date
|
||||
idx = quotes.index.copy()
|
||||
quotes = quotes.merge(tps_df, how="left", validate="many_to_one")
|
||||
quotes.index = idx
|
||||
# "end" = end of regular trading hours (including any auction)
|
||||
f_drop = quotes.index >= quotes["end"]
|
||||
f_drop = f_drop | (quotes.index < quotes["start"])
|
||||
if f_drop.any():
|
||||
# When printing report, ignore rows that were already NaNs:
|
||||
f_na = quotes[["Open","Close"]].isna().all(axis=1)
|
||||
n_nna = quotes.shape[0] - _np.sum(f_na)
|
||||
n_drop_nna = _np.sum(f_drop & ~f_na)
|
||||
quotes_dropped = quotes[f_drop]
|
||||
# if debug and n_drop_nna > 0:
|
||||
# print(f"Dropping {n_drop_nna}/{n_nna} intervals for falling outside regular trading hours")
|
||||
quotes = quotes[~f_drop]
|
||||
metadata["tradingPeriods"] = tps_df.drop(["_date"], axis=1)
|
||||
quotes = quotes.drop(["_date", "start", "end"], axis=1)
|
||||
return quotes
|
||||
|
||||
|
||||
def fix_Yahoo_returning_live_separate(quotes, interval, tz_exchange):
|
||||
# Yahoo bug fix. If market is open today then Yahoo normally returns
|
||||
# todays data as a separate row from rest-of week/month interval in above row.
|
||||
@@ -393,7 +505,7 @@ def fix_Yahoo_returning_live_separate(quotes, interval, tz_exchange):
|
||||
elif interval == "3mo":
|
||||
last_rows_same_interval = dt1.year == dt2.year and dt1.quarter == dt2.quarter
|
||||
else:
|
||||
last_rows_same_interval = False
|
||||
last_rows_same_interval = (dt1-dt2) < _pd.Timedelta(interval)
|
||||
|
||||
if last_rows_same_interval:
|
||||
# Last two rows are within same interval
|
||||
@@ -472,7 +584,7 @@ def safe_merge_dfs(df_main, df_sub, interval):
|
||||
new_index = None
|
||||
|
||||
if new_index is not None:
|
||||
new_index = new_index.tz_localize(df.index.tz, ambiguous=True)
|
||||
new_index = new_index.tz_localize(df.index.tz, ambiguous=True, nonexistent='shift_forward')
|
||||
df_sub = _reindex_events(df_sub, new_index, data_col)
|
||||
df = df_main.join(df_sub)
|
||||
|
||||
@@ -542,13 +654,15 @@ def safe_merge_dfs(df_main, df_sub, interval):
|
||||
## Not always possible to match events with trading, e.g. when released pre-market.
|
||||
## So have to append to bottom with nan prices.
|
||||
## But should only be impossible with intra-day price data.
|
||||
if interval.endswith('m') or interval.endswith('h'):
|
||||
if interval.endswith('m') or interval.endswith('h') or interval == "1d":
|
||||
# Update: is possible with daily data when dividend very recent
|
||||
f_missing = ~df_sub.index.isin(df.index)
|
||||
df_sub_missing = df_sub[f_missing]
|
||||
df_sub_missing = df_sub[f_missing].copy()
|
||||
keys = {"Adj Open", "Open", "Adj High", "High", "Adj Low", "Low", "Adj Close",
|
||||
"Close"}.intersection(df.columns)
|
||||
df_sub_missing[list(keys)] = _np.nan
|
||||
df = _pd.concat([df, df_sub_missing], sort=True)
|
||||
col_ordering = df.columns
|
||||
df = _pd.concat([df, df_sub_missing], sort=True)[col_ordering]
|
||||
else:
|
||||
raise Exception("Lost data during merge despite all attempts to align data (see above)")
|
||||
|
||||
@@ -576,6 +690,63 @@ def is_valid_timezone(tz: str) -> bool:
|
||||
return True
|
||||
|
||||
|
||||
def format_history_metadata(md):
|
||||
if not isinstance(md, dict):
|
||||
return md
|
||||
if len(md) == 0:
|
||||
return md
|
||||
|
||||
tz = md["exchangeTimezoneName"]
|
||||
|
||||
for k in ["firstTradeDate", "regularMarketTime"]:
|
||||
if k in md and md[k] is not None:
|
||||
md[k] = _pd.to_datetime(md[k], unit='s', utc=True).tz_convert(tz)
|
||||
|
||||
if "currentTradingPeriod" in md:
|
||||
for m in ["regular", "pre", "post"]:
|
||||
if m in md["currentTradingPeriod"]:
|
||||
for t in ["start", "end"]:
|
||||
md["currentTradingPeriod"][m][t] = \
|
||||
_pd.to_datetime(md["currentTradingPeriod"][m][t], unit='s', utc=True).tz_convert(tz)
|
||||
del md["currentTradingPeriod"][m]["gmtoffset"]
|
||||
del md["currentTradingPeriod"][m]["timezone"]
|
||||
|
||||
if "tradingPeriods" in md:
|
||||
if md["tradingPeriods"] == {"pre":[], "post":[]}:
|
||||
del md["tradingPeriods"]
|
||||
|
||||
if "tradingPeriods" in md:
|
||||
tps = md["tradingPeriods"]
|
||||
if isinstance(tps, list):
|
||||
# Only regular times
|
||||
df = _pd.DataFrame.from_records(_np.hstack(tps))
|
||||
df = df.drop(["timezone", "gmtoffset"], axis=1)
|
||||
df["start"] = _pd.to_datetime(df["start"], unit='s', utc=True).dt.tz_convert(tz)
|
||||
df["end"] = _pd.to_datetime(df["end"], unit='s', utc=True).dt.tz_convert(tz)
|
||||
elif isinstance(tps, dict):
|
||||
# Includes pre- and post-market
|
||||
pre_df = _pd.DataFrame.from_records(_np.hstack(tps["pre"]))
|
||||
post_df = _pd.DataFrame.from_records(_np.hstack(tps["post"]))
|
||||
regular_df = _pd.DataFrame.from_records(_np.hstack(tps["post"]))
|
||||
|
||||
pre_df = pre_df.rename(columns={"start":"pre_start", "end":"pre_end"}).drop(["timezone", "gmtoffset"], axis=1)
|
||||
post_df = post_df.rename(columns={"start":"post_start", "end":"post_end"}).drop(["timezone", "gmtoffset"], axis=1)
|
||||
regular_df = regular_df.drop(["timezone", "gmtoffset"], axis=1)
|
||||
|
||||
df = regular_df.join(pre_df).join(post_df)
|
||||
for c in ["start", "end", "pre_start", "pre_end", "post_start", "post_end"]:
|
||||
df[c] = _pd.to_datetime(df[c], unit='s', utc=True).dt.tz_convert(tz)
|
||||
else:
|
||||
raise Exception()
|
||||
|
||||
df.index = _pd.to_datetime(df["start"].dt.date)
|
||||
df.index = df.index.tz_localize(tz)
|
||||
df.index.name = "Date"
|
||||
|
||||
md["tradingPeriods"] = df
|
||||
|
||||
return md
|
||||
|
||||
class ProgressBar:
|
||||
def __init__(self, iterations, text='completed'):
|
||||
self.text = text
|
||||
@@ -638,7 +809,14 @@ class _KVStore:
|
||||
with self._cache_mutex:
|
||||
self.conn = _sqlite3.connect(filename, timeout=10, check_same_thread=False)
|
||||
self.conn.execute('pragma journal_mode=wal')
|
||||
self.conn.execute('create table if not exists "kv" (key TEXT primary key, value TEXT) without rowid')
|
||||
try:
|
||||
self.conn.execute('create table if not exists "kv" (key TEXT primary key, value TEXT) without rowid')
|
||||
except Exception as e:
|
||||
if 'near "without": syntax error' in str(e):
|
||||
# "without rowid" requires sqlite 3.8.2. Older versions will raise exception
|
||||
self.conn.execute('create table if not exists "kv" (key TEXT primary key, value TEXT)')
|
||||
else:
|
||||
raise
|
||||
self.conn.commit()
|
||||
_atexit.register(self.close)
|
||||
|
||||
@@ -679,8 +857,10 @@ class _TzCache:
|
||||
"""Simple sqlite file cache of ticker->timezone"""
|
||||
|
||||
def __init__(self):
|
||||
self._tz_db = None
|
||||
self._setup_cache_folder()
|
||||
# Must init db here, where is thread-safe
|
||||
self._tz_db = _KVStore(_os.path.join(self._db_dir, "tkr-tz.db"))
|
||||
self._migrate_cache_tkr_tz()
|
||||
|
||||
def _setup_cache_folder(self):
|
||||
if not _os.path.isdir(self._db_dir):
|
||||
@@ -712,21 +892,21 @@ class _TzCache:
|
||||
|
||||
@property
|
||||
def tz_db(self):
|
||||
# lazy init
|
||||
if self._tz_db is None:
|
||||
self._tz_db = _KVStore(_os.path.join(self._db_dir, "tkr-tz.db"))
|
||||
self._migrate_cache_tkr_tz()
|
||||
|
||||
return self._tz_db
|
||||
|
||||
def _migrate_cache_tkr_tz(self):
|
||||
"""Migrate contents from old ticker CSV-cache to SQLite db"""
|
||||
fp = _os.path.join(self._db_dir, "tkr-tz.csv")
|
||||
if not _os.path.isfile(fp):
|
||||
old_cache_file_path = _os.path.join(self._db_dir, "tkr-tz.csv")
|
||||
|
||||
if not _os.path.isfile(old_cache_file_path):
|
||||
return None
|
||||
df = _pd.read_csv(fp, index_col="Ticker")
|
||||
self.tz_db.bulk_set(df.to_dict()['Tz'])
|
||||
_os.remove(fp)
|
||||
try:
|
||||
df = _pd.read_csv(old_cache_file_path, index_col="Ticker")
|
||||
except _pd.errors.EmptyDataError:
|
||||
_os.remove(old_cache_file_path)
|
||||
else:
|
||||
self.tz_db.bulk_set(df.to_dict()['Tz'])
|
||||
_os.remove(old_cache_file_path)
|
||||
|
||||
|
||||
class _TzCacheDummy:
|
||||
|
||||
@@ -1 +1 @@
|
||||
version = "0.2.0rc2"
|
||||
version = "0.2.13b1"
|
||||
|
||||
Reference in New Issue
Block a user