Compare commits
70 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
1ddc38c04f | ||
|
|
fcab4ea37e | ||
|
|
d40ad6282a | ||
|
|
c95c02fdfc | ||
|
|
5fb9cab817 | ||
|
|
1c39ca4762 | ||
|
|
27868a4677 | ||
|
|
0da75493dc | ||
|
|
14c6d05854 | ||
|
|
8e369a2736 | ||
|
|
e09f77eb06 | ||
|
|
7db798e900 | ||
|
|
3b7758b3ab | ||
|
|
b729bcb1df | ||
|
|
6d7c5b6f4c | ||
|
|
45568fd765 | ||
|
|
fc020d953a | ||
|
|
bc645bb7dd | ||
|
|
74198aeed4 | ||
|
|
890922f68b | ||
|
|
44ef4a73ac | ||
|
|
c745df183a | ||
|
|
0e069dbbff | ||
|
|
25144351a4 | ||
|
|
3dd7e40df2 | ||
|
|
5bbcedfe2e | ||
|
|
fc049e1e0d | ||
|
|
00a44a8132 | ||
|
|
4ec93d90f1 | ||
|
|
1081f1e809 | ||
|
|
fed35b175f | ||
|
|
3f6a46d77d | ||
|
|
dee9a55e43 | ||
|
|
20b3b438e2 | ||
|
|
0be2ce1fd5 | ||
|
|
2d60f9ad2a | ||
|
|
2919ccb732 | ||
|
|
ffa697cea1 | ||
|
|
e3aae8a8ba | ||
|
|
4ded15740e | ||
|
|
506f792772 | ||
|
|
5a342cf8b0 | ||
|
|
52f6462d20 | ||
|
|
508afcae23 | ||
|
|
a9859e2f9f | ||
|
|
d0e59203cc | ||
|
|
b94173bddf | ||
|
|
a928eb45fd | ||
|
|
60c7477a03 | ||
|
|
42d946cee1 | ||
|
|
24958d5442 | ||
|
|
966473f7c7 | ||
|
|
ae7f371ada | ||
|
|
af7806640f | ||
|
|
64b4726c4e | ||
|
|
5adddf3e88 | ||
|
|
5bbe358ad7 | ||
|
|
38c1323313 | ||
|
|
70243657a7 | ||
|
|
e242617237 | ||
|
|
59d0974773 | ||
|
|
5bc3ec01d0 | ||
|
|
3f911745c4 | ||
|
|
f95eef6489 | ||
|
|
3ac85397cb | ||
|
|
873ab0f574 | ||
|
|
f5264649b9 | ||
|
|
90731511ed | ||
|
|
a8f0998ab8 | ||
|
|
02785d61a1 |
18
.github/workflows/ci.yml
vendored
18
.github/workflows/ci.yml
vendored
@@ -1,18 +0,0 @@
|
||||
name: ci
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- master
|
||||
- main
|
||||
jobs:
|
||||
deploy:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- uses: actions/setup-python@v4
|
||||
with:
|
||||
python-version: 3.x
|
||||
- run: pip install -r requirements.txt
|
||||
- run: pip install mkdocstrings==0.14.0
|
||||
- run: pip install mkdocs-material
|
||||
- run: mkdocs gh-deploy --force
|
||||
35
.github/workflows/pytest.yml.disabled
vendored
Normal file
35
.github/workflows/pytest.yml.disabled
vendored
Normal file
@@ -0,0 +1,35 @@
|
||||
name: Pytest
|
||||
|
||||
on:
|
||||
pull_request:
|
||||
branches:
|
||||
- master
|
||||
- main
|
||||
- dev
|
||||
|
||||
jobs:
|
||||
test:
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v3
|
||||
|
||||
- name: Set up Python
|
||||
uses: actions/setup-python@v4
|
||||
with:
|
||||
python-version: "3.12"
|
||||
cache: 'pip'
|
||||
|
||||
- name: Install dependencies
|
||||
run: |
|
||||
python -m pip install --upgrade pip
|
||||
pip install -r requirements.txt pytest
|
||||
|
||||
- name: Run non-cache tests
|
||||
run: pytest tests/ --ignore tests/test_cache.py --ignore tests/test_price_repair.py
|
||||
|
||||
- name: Run cache tests
|
||||
run: |
|
||||
pytest tests/test_cache.py::TestCache
|
||||
pytest tests/test_cache.py::TestCacheNoPermission
|
||||
@@ -1,6 +1,46 @@
|
||||
Change Log
|
||||
===========
|
||||
|
||||
0.2.53
|
||||
------
|
||||
Fixes:
|
||||
- Fix: Failed to parse holders JSON data #2234
|
||||
- Fix: Bad data in Holders #2244
|
||||
- Stop CSRF-cookie-fetch fail killing yfinance #2249
|
||||
- Fix Market Docs #2250
|
||||
- Fix: Broken "See also" links in documentation #2253
|
||||
- Fix: Interval check and error message formatting in multi.py #2256
|
||||
Improve:
|
||||
- Add pre- / post-stock prices (and other useful information) #2212
|
||||
- Warn user when use download() without specifying auto_adjust #2230
|
||||
- Refactor: Earnings Dates – Switch to API Fetching #2247
|
||||
- Improve prices div repair #2260
|
||||
Maintenance:
|
||||
- Add GitHub Actions workflow and fix failing tests #2233
|
||||
|
||||
0.2.52
|
||||
------
|
||||
Features:
|
||||
- Improve Screener & docs #2207
|
||||
- Add Market summary & status #2175
|
||||
- Support custom period in Ticker.history() #2192
|
||||
- raise YfRateLimitError if rate limited #2108
|
||||
- add more options to Search #2191
|
||||
Fixes:
|
||||
- remove hardcoded keys in Analysis #2194
|
||||
- handle Yahoo changed Search response #2202
|
||||
Maintenance:
|
||||
- add optional dependencies to requirements.txt #2199
|
||||
|
||||
0.2.51
|
||||
------
|
||||
Features:
|
||||
- Screener tweaks #2168
|
||||
- Search #2160
|
||||
- get_news() expose count #2173
|
||||
Fixes:
|
||||
- earnings_dates #2169
|
||||
|
||||
0.2.50
|
||||
------
|
||||
Fixes:
|
||||
|
||||
@@ -39,6 +39,8 @@ Yahoo! finance API is intended for personal use only.**
|
||||
- `Ticker`: single ticker data
|
||||
- `Tickers`: multiple tickers' data
|
||||
- `download`: download market data for multiple tickers
|
||||
- `Market`: get infomation about a market
|
||||
- `Search`: quotes and news from search
|
||||
- `Sector` and `Industry`: sector and industry information
|
||||
- `EquityQuery` and `Screener`: build query to screen market
|
||||
|
||||
|
||||
@@ -29,7 +29,8 @@ exclude_patterns = []
|
||||
autoclass_content = 'both'
|
||||
autosummary_generate = True
|
||||
autodoc_default_options = {
|
||||
'exclude-members': '__init__'
|
||||
'exclude-members': '__init__',
|
||||
'members': True,
|
||||
}
|
||||
|
||||
# -- Options for HTML output -------------------------------------------------
|
||||
|
||||
6
doc/source/reference/examples/market.py
Normal file
6
doc/source/reference/examples/market.py
Normal file
@@ -0,0 +1,6 @@
|
||||
import yfinance as yf
|
||||
|
||||
EUROPE = yf.Market("EUROPE")
|
||||
|
||||
status = EUROPE.status
|
||||
summary = EUROPE.summary
|
||||
10
doc/source/reference/examples/search.py
Normal file
10
doc/source/reference/examples/search.py
Normal file
@@ -0,0 +1,10 @@
|
||||
import yfinance as yf
|
||||
|
||||
# get list of quotes
|
||||
quotes = yf.Search("AAPL", max_results=10).quotes
|
||||
|
||||
# get list of news
|
||||
news = yf.Search("Google", news_count=10).news
|
||||
|
||||
# get list of related research
|
||||
research = yf.Search("apple", include_research=True).research
|
||||
@@ -15,11 +15,15 @@ The following are the publicly available classes, and functions exposed by the `
|
||||
|
||||
- :attr:`Ticker <yfinance.Ticker>`: Class for accessing single ticker data.
|
||||
- :attr:`Tickers <yfinance.Tickers>`: Class for handling multiple tickers.
|
||||
- :attr:`Market <yfinance.Market>`: Class for accessing market summary.
|
||||
- :attr:`download <yfinance.download>`: Function to download market data for multiple tickers.
|
||||
- :attr:`Search <yfinance.Search>`: Class for accessing search results.
|
||||
- :attr:`Sector <yfinance.Sector>`: Domain class for accessing sector information.
|
||||
- :attr:`Industry <yfinance.Industry>`: Domain class for accessing industry information.
|
||||
- :attr:`download <yfinance.download>`: Function to download market data for multiple tickers.
|
||||
- :attr:`EquityQuery <yfinance.EquityQuery>`: Class to build equity market query.
|
||||
- :attr:`Screener <yfinance.Screener>`: Class to screen the market using defined query.
|
||||
- :attr:`Market <yfinance.Market>`: Class for accessing market status & summary.
|
||||
- :attr:`EquityQuery <yfinance.EquityQuery>`: Class to build equity query filters.
|
||||
- :attr:`FundQuery <yfinance.FundQuery>`: Class to build fund query filters.
|
||||
- :attr:`screen <yfinance.screen>`: Run equity/fund queries.
|
||||
- :attr:`enable_debug_mode <yfinance.enable_debug_mode>`: Function to enable debug mode for logging.
|
||||
- :attr:`set_tz_cache_location <yfinance.set_tz_cache_location>`: Function to set the timezone cache location.
|
||||
|
||||
@@ -30,9 +34,13 @@ The following are the publicly available classes, and functions exposed by the `
|
||||
|
||||
yfinance.ticker_tickers
|
||||
yfinance.stock
|
||||
yfinance.market
|
||||
yfinance.financials
|
||||
yfinance.analysis
|
||||
yfinance.market
|
||||
yfinance.search
|
||||
yfinance.sector_industry
|
||||
yfinance.screener
|
||||
yfinance.functions
|
||||
|
||||
yfinance.funds_data
|
||||
|
||||
@@ -13,25 +13,6 @@ The `download` function allows you to retrieve market data for multiple tickers
|
||||
|
||||
download
|
||||
|
||||
Query Market Data
|
||||
~~~~~~~~~~~~~~~~~~~~~
|
||||
The `Sector` and `Industry` modules allow you to access the sector and industry information.
|
||||
|
||||
.. autosummary::
|
||||
:toctree: api/
|
||||
|
||||
EquityQuery
|
||||
Screener
|
||||
|
||||
.. seealso::
|
||||
:attr:`EquityQuery.valid_operand_fields <yfinance.EquityQuery.valid_operand_fields>`
|
||||
supported operand values for query
|
||||
:attr:`EquityQuery.valid_eq_operand_map <yfinance.EquityQuery.valid_eq_operand_map>`
|
||||
supported `EQ query operand parameters`
|
||||
:attr:`Screener.predefined_bodies <yfinance.Screener.predefined_bodies>`
|
||||
supported predefined screens
|
||||
|
||||
|
||||
Enable Debug Mode
|
||||
~~~~~~~~~~~~~~~~~
|
||||
Enables logging of debug information for the `yfinance` package.
|
||||
|
||||
41
doc/source/reference/yfinance.market.rst
Normal file
41
doc/source/reference/yfinance.market.rst
Normal file
@@ -0,0 +1,41 @@
|
||||
=====================
|
||||
Market
|
||||
=====================
|
||||
|
||||
.. currentmodule:: yfinance
|
||||
|
||||
|
||||
Class
|
||||
------------
|
||||
The `Market` class, allows you to access market data in a Pythonic way.
|
||||
|
||||
.. autosummary::
|
||||
:toctree: api/
|
||||
|
||||
Market
|
||||
|
||||
Market Sample Code
|
||||
------------------
|
||||
|
||||
.. literalinclude:: examples/market.py
|
||||
:language: python
|
||||
|
||||
|
||||
Markets
|
||||
------------
|
||||
There are 8 different markets available in Yahoo Finance.
|
||||
|
||||
* US
|
||||
* GB
|
||||
|
||||
\
|
||||
|
||||
* ASIA
|
||||
* EUROPE
|
||||
|
||||
\
|
||||
|
||||
* RATES
|
||||
* COMMODITIES
|
||||
* CURRENCIES
|
||||
* CRYPTOCURRENCIES
|
||||
27
doc/source/reference/yfinance.screener.rst
Normal file
27
doc/source/reference/yfinance.screener.rst
Normal file
@@ -0,0 +1,27 @@
|
||||
=========================
|
||||
Screener & Query
|
||||
=========================
|
||||
|
||||
.. currentmodule:: yfinance
|
||||
|
||||
Query Market Data
|
||||
~~~~~~~~~~~~~~~~~~~~~
|
||||
The `Sector` and `Industry` modules allow you to access the sector and industry information.
|
||||
|
||||
.. autosummary::
|
||||
:toctree: api/
|
||||
|
||||
EquityQuery
|
||||
FundQuery
|
||||
screen
|
||||
|
||||
.. seealso::
|
||||
:attr:`EquityQuery.valid_fields <yfinance.EquityQuery.valid_fields>`
|
||||
supported operand values for query
|
||||
:attr:`EquityQuery.valid_values <yfinance.EquityQuery.valid_values>`
|
||||
supported `EQ query operand parameters`
|
||||
:attr:`FundQuery.valid_fields <yfinance.FundQuery.valid_fields>`
|
||||
supported operand values for query
|
||||
:attr:`FundQuery.valid_values <yfinance.FundQuery.valid_values>`
|
||||
supported `EQ query operand parameters`
|
||||
|
||||
22
doc/source/reference/yfinance.search.rst
Normal file
22
doc/source/reference/yfinance.search.rst
Normal file
@@ -0,0 +1,22 @@
|
||||
=====================
|
||||
Search & News
|
||||
=====================
|
||||
|
||||
.. currentmodule:: yfinance
|
||||
|
||||
|
||||
Class
|
||||
------------
|
||||
The `Search` module, allows you to access search data in a Pythonic way.
|
||||
|
||||
.. autosummary::
|
||||
:toctree: api/
|
||||
|
||||
Search
|
||||
|
||||
Search Sample Code
|
||||
------------------
|
||||
The `Search` module, allows you to access search data in a Pythonic way.
|
||||
|
||||
.. literalinclude:: examples/search.py
|
||||
:language: python
|
||||
@@ -1,5 +1,5 @@
|
||||
{% set name = "yfinance" %}
|
||||
{% set version = "0.2.50" %}
|
||||
{% set version = "0.2.53" %}
|
||||
|
||||
package:
|
||||
name: "{{ name|lower }}"
|
||||
|
||||
19
mkdocs.yml
19
mkdocs.yml
@@ -1,19 +0,0 @@
|
||||
# site_name: My Docs
|
||||
|
||||
# # mkdocs.yml
|
||||
# theme:
|
||||
# name: "material"
|
||||
|
||||
# plugins:
|
||||
# - search
|
||||
# - mkdocstrings
|
||||
|
||||
# nav:
|
||||
# - Introduction: 'index.md'
|
||||
# - Installation: 'installation.md'
|
||||
# - Quick Start: 'quickstart.md'
|
||||
# # - Ticker: 'Ticker.md'
|
||||
# - TickerBase: 'TickerBase.md'
|
||||
# # - Tickers: 'Tickers.md'
|
||||
# - utils: 'utils.md'
|
||||
# - multi: 'multi.md'
|
||||
@@ -2,10 +2,11 @@ pandas>=1.3.0
|
||||
numpy>=1.16.5
|
||||
requests>=2.31
|
||||
multitasking>=0.0.7
|
||||
lxml>=4.9.1
|
||||
platformdirs>=2.0.0
|
||||
pytz>=2022.5
|
||||
frozendict>=2.3.4
|
||||
beautifulsoup4>=4.11.1
|
||||
html5lib>=1.1
|
||||
peewee>=3.16.2
|
||||
peewee>=3.16.2
|
||||
requests_cache>=1.0
|
||||
requests_ratelimiter>=0.3.1
|
||||
scipy>=1.6.3
|
||||
4
setup.py
4
setup.py
@@ -61,9 +61,9 @@ setup(
|
||||
packages=find_packages(exclude=['contrib', 'docs', 'tests', 'examples']),
|
||||
install_requires=['pandas>=1.3.0', 'numpy>=1.16.5',
|
||||
'requests>=2.31', 'multitasking>=0.0.7',
|
||||
'lxml>=4.9.1', 'platformdirs>=2.0.0', 'pytz>=2022.5',
|
||||
'platformdirs>=2.0.0', 'pytz>=2022.5',
|
||||
'frozendict>=2.3.4', 'peewee>=3.16.2',
|
||||
'beautifulsoup4>=4.11.1', 'html5lib>=1.1'],
|
||||
'beautifulsoup4>=4.11.1'],
|
||||
extras_require={
|
||||
'nospam': ['requests_cache>=1.0', 'requests_ratelimiter>=0.3.1'],
|
||||
'repair': ['scipy>=1.6.3'],
|
||||
|
||||
@@ -5,9 +5,7 @@ import datetime as _dt
|
||||
import sys
|
||||
import os
|
||||
import yfinance
|
||||
from requests import Session
|
||||
from requests_cache import CacheMixin, SQLiteCache
|
||||
from requests_ratelimiter import LimiterMixin, MemoryQueueBucket
|
||||
from requests_ratelimiter import LimiterSession
|
||||
from pyrate_limiter import Duration, RequestRate, Limiter
|
||||
|
||||
_parent_dp = os.path.abspath(os.path.join(os.path.dirname(__file__), '..'))
|
||||
@@ -27,19 +25,21 @@ if os.path.isdir(testing_cache_dirpath):
|
||||
import shutil
|
||||
shutil.rmtree(testing_cache_dirpath)
|
||||
|
||||
|
||||
# Setup a session to rate-limit and cache persistently:
|
||||
class CachedLimiterSession(CacheMixin, LimiterMixin, Session):
|
||||
pass
|
||||
history_rate = RequestRate(1, Duration.SECOND*2)
|
||||
# Setup a session to only rate-limit
|
||||
history_rate = RequestRate(1, Duration.SECOND)
|
||||
limiter = Limiter(history_rate)
|
||||
cache_fp = os.path.join(testing_cache_dirpath, "unittests-cache")
|
||||
session_gbl = CachedLimiterSession(
|
||||
limiter=limiter,
|
||||
bucket_class=MemoryQueueBucket,
|
||||
backend=SQLiteCache(cache_fp, expire_after=_dt.timedelta(hours=1)),
|
||||
)
|
||||
# Use this instead if only want rate-limiting:
|
||||
# from requests_ratelimiter import LimiterSession
|
||||
# session_gbl = LimiterSession(limiter=limiter)
|
||||
session_gbl = LimiterSession(limiter=limiter)
|
||||
|
||||
# Use this instead if you also want caching:
|
||||
# from requests_cache import CacheMixin, SQLiteCache
|
||||
# from requests_ratelimiter import LimiterMixin
|
||||
# from requests import Session
|
||||
# from pyrate_limiter import MemoryQueueBucket
|
||||
# class CachedLimiterSession(CacheMixin, LimiterMixin, Session):
|
||||
# pass
|
||||
# cache_fp = os.path.join(testing_cache_dirpath, "unittests-cache")
|
||||
# session_gbl = CachedLimiterSession(
|
||||
# limiter=limiter,
|
||||
# bucket_class=MemoryQueueBucket,
|
||||
# backend=SQLiteCache(cache_fp, expire_after=_dt.timedelta(hours=1)),
|
||||
# )
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
93
tests/test_cache.py
Normal file
93
tests/test_cache.py
Normal file
@@ -0,0 +1,93 @@
|
||||
"""
|
||||
Tests for cache
|
||||
|
||||
To run all tests in suite from commandline:
|
||||
python -m unittest tests.cache
|
||||
|
||||
Specific test class:
|
||||
python -m unittest tests.cache.TestCache
|
||||
|
||||
"""
|
||||
from unittest import TestSuite
|
||||
|
||||
from tests.context import yfinance as yf
|
||||
|
||||
import unittest
|
||||
import tempfile
|
||||
import os
|
||||
|
||||
|
||||
class TestCache(unittest.TestCase):
|
||||
@classmethod
|
||||
def setUpClass(cls):
|
||||
cls.tempCacheDir = tempfile.TemporaryDirectory()
|
||||
yf.set_tz_cache_location(cls.tempCacheDir.name)
|
||||
|
||||
@classmethod
|
||||
def tearDownClass(cls):
|
||||
yf.cache._TzDBManager.close_db()
|
||||
cls.tempCacheDir.cleanup()
|
||||
|
||||
def test_storeTzNoRaise(self):
|
||||
# storing TZ to cache should never raise exception
|
||||
tkr = 'AMZN'
|
||||
tz1 = "America/New_York"
|
||||
tz2 = "London/Europe"
|
||||
cache = yf.cache.get_tz_cache()
|
||||
cache.store(tkr, tz1)
|
||||
cache.store(tkr, tz2)
|
||||
|
||||
def test_setTzCacheLocation(self):
|
||||
self.assertEqual(yf.cache._TzDBManager.get_location(), self.tempCacheDir.name)
|
||||
|
||||
tkr = 'AMZN'
|
||||
tz1 = "America/New_York"
|
||||
cache = yf.cache.get_tz_cache()
|
||||
cache.store(tkr, tz1)
|
||||
|
||||
self.assertTrue(os.path.exists(os.path.join(self.tempCacheDir.name, "tkr-tz.db")))
|
||||
|
||||
|
||||
class TestCacheNoPermission(unittest.TestCase):
|
||||
@classmethod
|
||||
def setUpClass(cls):
|
||||
if os.name == "nt": # Windows
|
||||
cls.cache_path = "C:\\Windows\\System32\\yf-cache"
|
||||
else: # Unix/Linux/MacOS
|
||||
# Use a writable directory
|
||||
cls.cache_path = "/yf-cache"
|
||||
yf.set_tz_cache_location(cls.cache_path)
|
||||
|
||||
def test_tzCacheRootStore(self):
|
||||
# Test that if cache path in read-only filesystem, no exception.
|
||||
tkr = 'AMZN'
|
||||
tz1 = "America/New_York"
|
||||
|
||||
# During attempt to store, will discover cannot write
|
||||
yf.cache.get_tz_cache().store(tkr, tz1)
|
||||
|
||||
# Handling the store failure replaces cache with a dummy
|
||||
cache = yf.cache.get_tz_cache()
|
||||
self.assertTrue(cache.dummy)
|
||||
cache.store(tkr, tz1)
|
||||
|
||||
def test_tzCacheRootLookup(self):
|
||||
# Test that if cache path in read-only filesystem, no exception.
|
||||
tkr = 'AMZN'
|
||||
# During attempt to lookup, will discover cannot write
|
||||
yf.cache.get_tz_cache().lookup(tkr)
|
||||
|
||||
# Handling the lookup failure replaces cache with a dummy
|
||||
cache = yf.cache.get_tz_cache()
|
||||
self.assertTrue(cache.dummy)
|
||||
cache.lookup(tkr)
|
||||
|
||||
def suite():
|
||||
ts: TestSuite = unittest.TestSuite()
|
||||
ts.addTest(TestCache('Test cache'))
|
||||
ts.addTest(TestCacheNoPermission('Test cache no permission'))
|
||||
return ts
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
unittest.main()
|
||||
@@ -367,9 +367,9 @@ class TestPriceRepair(unittest.TestCase):
|
||||
"Close": [103.03, 102.05, 102.08],
|
||||
"Adj Close": [102.03, 102.05, 102.08],
|
||||
"Volume": [560, 137, 117]},
|
||||
index=_pd.to_datetime([_dt.datetime(2022, 11, 1),
|
||||
_dt.datetime(2022, 10, 31),
|
||||
_dt.datetime(2022, 10, 30)]))
|
||||
index=_pd.to_datetime([_dt.datetime(2024, 11, 1),
|
||||
_dt.datetime(2024, 10, 31),
|
||||
_dt.datetime(2024, 10, 30)]))
|
||||
df_bad = df_bad.sort_index()
|
||||
df_bad.index.name = "Date"
|
||||
df_bad.index = df_bad.index.tz_localize(tz_exchange)
|
||||
@@ -377,9 +377,9 @@ class TestPriceRepair(unittest.TestCase):
|
||||
repaired_df = hist._fix_zeroes(df_bad, "1d", tz_exchange, prepost=False)
|
||||
|
||||
correct_df = df_bad.copy()
|
||||
correct_df.loc["2022-11-01", "Open"] = 102.080002
|
||||
correct_df.loc["2022-11-01", "Low"] = 102.032501
|
||||
correct_df.loc["2022-11-01", "High"] = 102.080002
|
||||
correct_df.loc["2024-11-01", "Open"] = 102.572729
|
||||
correct_df.loc["2024-11-01", "Low"] = 102.309091
|
||||
correct_df.loc["2024-11-01", "High"] = 102.572729
|
||||
for c in ["Open", "Low", "High", "Close"]:
|
||||
self.assertTrue(_np.isclose(repaired_df[c], correct_df[c], rtol=1e-8).all())
|
||||
|
||||
@@ -462,7 +462,7 @@ class TestPriceRepair(unittest.TestCase):
|
||||
# Stocks that split in 2022 but no problems in Yahoo data,
|
||||
# so repair should change nothing
|
||||
good_tkrs = ['AMZN', 'DXCM', 'FTNT', 'GOOG', 'GME', 'PANW', 'SHOP', 'TSLA']
|
||||
good_tkrs += ['AEI', 'GHI', 'IRON', 'LXU', 'NUZE', 'RSLS', 'TISI']
|
||||
good_tkrs += ['AEI', 'GHI', 'IRON', 'LXU', 'RSLS', 'TISI']
|
||||
good_tkrs += ['BOL.ST', 'TUI1.DE']
|
||||
intervals = ['1d', '1wk', '1mo', '3mo']
|
||||
for tkr in good_tkrs:
|
||||
@@ -580,7 +580,6 @@ class TestPriceRepair(unittest.TestCase):
|
||||
# Div 100x
|
||||
bad_tkrs += ['ABDP.L']
|
||||
bad_tkrs += ['ELCO.L']
|
||||
bad_tkrs += ['KWS.L']
|
||||
bad_tkrs += ['PSH.L']
|
||||
|
||||
# Div 100x and adjust too big
|
||||
|
||||
@@ -118,7 +118,7 @@ class TestPriceHistory(unittest.TestCase):
|
||||
continue
|
||||
test_run = True
|
||||
|
||||
df = dat.history(start=dt.date() - _dt.timedelta(days=7), interval="1wk")
|
||||
df = dat.history(start=dt.date() - _dt.timedelta(days=13), interval="1wk")
|
||||
dt0 = df.index[-2]
|
||||
dt1 = df.index[-1]
|
||||
try:
|
||||
@@ -401,7 +401,7 @@ class TestPriceHistory(unittest.TestCase):
|
||||
|
||||
# Setup
|
||||
tkr = "AMZN"
|
||||
special_day = _dt.date(2023, 11, 24)
|
||||
special_day = _dt.date(2024, 11, 29)
|
||||
time_early_close = _dt.time(13)
|
||||
dat = yf.Ticker(tkr, session=self.session)
|
||||
|
||||
@@ -427,8 +427,8 @@ class TestPriceHistory(unittest.TestCase):
|
||||
dat = yf.Ticker(tkr, session=self.session)
|
||||
|
||||
# Test no other afternoons (or mornings) were pruned
|
||||
start_d = _dt.date(2023, 1, 1)
|
||||
end_d = _dt.date(2023+1, 1, 1)
|
||||
start_d = _dt.date(2024, 1, 1)
|
||||
end_d = _dt.date(2024+1, 1, 1)
|
||||
df = dat.history(start=start_d, end=end_d, interval="1h", prepost=False, keepna=True)
|
||||
last_dts = _pd.Series(df.index).groupby(df.index.date).last()
|
||||
dfd = dat.history(start=start_d, end=end_d, interval='1d', prepost=False, keepna=True)
|
||||
|
||||
@@ -1,133 +1,38 @@
|
||||
import unittest
|
||||
from unittest.mock import patch, MagicMock
|
||||
from yfinance.const import PREDEFINED_SCREENER_BODY_MAP
|
||||
from yfinance.screener.screener import Screener
|
||||
from yfinance.screener.screener_query import EquityQuery
|
||||
from yfinance.screener.screener import screen
|
||||
from yfinance.screener.query import EquityQuery
|
||||
|
||||
|
||||
class TestScreener(unittest.TestCase):
|
||||
|
||||
@classmethod
|
||||
def setUpClass(self):
|
||||
self.screener = Screener()
|
||||
self.query = EquityQuery('gt',['eodprice',3])
|
||||
|
||||
def test_set_default_body(self):
|
||||
self.screener.set_default_body(self.query)
|
||||
|
||||
self.assertEqual(self.screener.body['offset'], 0)
|
||||
self.assertEqual(self.screener.body['size'], 100)
|
||||
self.assertEqual(self.screener.body['sortField'], 'ticker')
|
||||
self.assertEqual(self.screener.body['sortType'], 'desc')
|
||||
self.assertEqual(self.screener.body['quoteType'], 'equity')
|
||||
self.assertEqual(self.screener.body['query'], self.query.to_dict())
|
||||
self.assertEqual(self.screener.body['userId'], '')
|
||||
self.assertEqual(self.screener.body['userIdType'], 'guid')
|
||||
|
||||
def test_set_predefined_body(self):
|
||||
k = 'most_actives'
|
||||
self.screener.set_predefined_body(k)
|
||||
self.assertEqual(self.screener.body, PREDEFINED_SCREENER_BODY_MAP[k])
|
||||
|
||||
def test_set_predefined_body_invalid_key(self):
|
||||
with self.assertRaises(ValueError):
|
||||
self.screener.set_predefined_body('invalid_key')
|
||||
|
||||
def test_set_body(self):
|
||||
body = {
|
||||
"offset": 0,
|
||||
"size": 100,
|
||||
"sortField": "ticker",
|
||||
"sortType": "desc",
|
||||
"quoteType": "equity",
|
||||
"query": self.query.to_dict(),
|
||||
"userId": "",
|
||||
"userIdType": "guid"
|
||||
}
|
||||
self.screener.set_body(body)
|
||||
|
||||
self.assertEqual(self.screener.body, body)
|
||||
|
||||
def test_set_body_missing_keys(self):
|
||||
body = {
|
||||
"offset": 0,
|
||||
"size": 100,
|
||||
"sortField": "ticker",
|
||||
"sortType": "desc",
|
||||
"quoteType": "equity"
|
||||
}
|
||||
with self.assertRaises(ValueError):
|
||||
self.screener.set_body(body)
|
||||
|
||||
def test_set_body_extra_keys(self):
|
||||
body = {
|
||||
"offset": 0,
|
||||
"size": 100,
|
||||
"sortField": "ticker",
|
||||
"sortType": "desc",
|
||||
"quoteType": "equity",
|
||||
"query": self.query.to_dict(),
|
||||
"userId": "",
|
||||
"userIdType": "guid",
|
||||
"extraKey": "extraValue"
|
||||
}
|
||||
with self.assertRaises(ValueError):
|
||||
self.screener.set_body(body)
|
||||
|
||||
def test_patch_body(self):
|
||||
initial_body = {
|
||||
"offset": 0,
|
||||
"size": 100,
|
||||
"sortField": "ticker",
|
||||
"sortType": "desc",
|
||||
"quoteType": "equity",
|
||||
"query": self.query.to_dict(),
|
||||
"userId": "",
|
||||
"userIdType": "guid"
|
||||
}
|
||||
self.screener.set_body(initial_body)
|
||||
patch_values = {"size": 50}
|
||||
self.screener.patch_body(patch_values)
|
||||
|
||||
self.assertEqual(self.screener.body['size'], 50)
|
||||
self.assertEqual(self.screener.body['query'], self.query.to_dict())
|
||||
|
||||
def test_patch_body_extra_keys(self):
|
||||
initial_body = {
|
||||
"offset": 0,
|
||||
"size": 100,
|
||||
"sortField": "ticker",
|
||||
"sortType": "desc",
|
||||
"quoteType": "equity",
|
||||
"query": self.query.to_dict(),
|
||||
"userId": "",
|
||||
"userIdType": "guid"
|
||||
}
|
||||
self.screener.set_body(initial_body)
|
||||
patch_values = {"extraKey": "extraValue"}
|
||||
with self.assertRaises(ValueError):
|
||||
self.screener.patch_body(patch_values)
|
||||
self.predefined = 'aggressive_small_caps'
|
||||
|
||||
@patch('yfinance.screener.screener.YfData.post')
|
||||
def test_fetch(self, mock_post):
|
||||
mock_response = MagicMock()
|
||||
mock_response.json.return_value = {'finance': {'result': [{}]}}
|
||||
mock_post.return_value = mock_response
|
||||
def test_set_large_size_in_body(self, mock_post):
|
||||
with self.assertRaises(ValueError):
|
||||
screen(self.query, size=251)
|
||||
|
||||
self.screener.set_default_body(self.query)
|
||||
response = self.screener._fetch()
|
||||
|
||||
self.assertEqual(response, {'finance': {'result': [{}]}})
|
||||
|
||||
@patch('yfinance.screener.screener.YfData.post')
|
||||
def test_fetch_and_parse(self, mock_post):
|
||||
@patch('yfinance.data.YfData.post')
|
||||
def test_fetch_query(self, mock_post):
|
||||
mock_response = MagicMock()
|
||||
mock_response.json.return_value = {'finance': {'result': [{'key': 'value'}]}}
|
||||
mock_post.return_value = mock_response
|
||||
|
||||
self.screener.set_default_body(self.query)
|
||||
self.screener._fetch_and_parse()
|
||||
self.assertEqual(self.screener.response, {'key': 'value'})
|
||||
response = screen(self.query)
|
||||
self.assertEqual(response, {'key': 'value'})
|
||||
|
||||
@patch('yfinance.data.YfData.get')
|
||||
def test_fetch_predefined(self, mock_get):
|
||||
mock_response = MagicMock()
|
||||
mock_response.json.return_value = {'finance': {'result': [{'key': 'value'}]}}
|
||||
mock_get.return_value = mock_response
|
||||
|
||||
response = screen(self.predefined)
|
||||
self.assertEqual(response, {'key': 'value'})
|
||||
|
||||
if __name__ == '__main__':
|
||||
unittest.main()
|
||||
42
tests/test_search.py
Normal file
42
tests/test_search.py
Normal file
@@ -0,0 +1,42 @@
|
||||
import unittest
|
||||
|
||||
from tests.context import yfinance as yf
|
||||
|
||||
|
||||
class TestSearch(unittest.TestCase):
|
||||
def test_invalid_query(self):
|
||||
search = yf.Search(query="XYZXYZ")
|
||||
|
||||
self.assertEqual(len(search.quotes), 0)
|
||||
self.assertEqual(len(search.news), 0)
|
||||
self.assertEqual(len(search.lists), 0)
|
||||
self.assertEqual(len(search.nav), 0)
|
||||
self.assertEqual(len(search.research), 0)
|
||||
|
||||
def test_empty_query(self):
|
||||
search = yf.Search(query="")
|
||||
|
||||
self.assertEqual(len(search.quotes), 0)
|
||||
self.assertEqual(len(search.news), 0)
|
||||
|
||||
def test_fuzzy_query(self):
|
||||
search = yf.Search(query="Appel", enable_fuzzy_query=True)
|
||||
|
||||
# Check if the fuzzy search retrieves relevant results despite the typo
|
||||
self.assertGreater(len(search.quotes), 0)
|
||||
self.assertIn("AAPL", search.quotes[0]['symbol'])
|
||||
|
||||
def test_quotes(self):
|
||||
search = yf.Search(query="AAPL", max_results=5)
|
||||
|
||||
self.assertEqual(len(search.quotes), 5)
|
||||
self.assertIn("AAPL", search.quotes[0]['symbol'])
|
||||
|
||||
def test_news(self):
|
||||
search = yf.Search(query="AAPL", news_count=3)
|
||||
|
||||
self.assertEqual(len(search.news), 3)
|
||||
|
||||
def test_research_reports(self):
|
||||
search = yf.Search(query="AAPL", include_research=True)
|
||||
self.assertEqual(len(search.research), 3)
|
||||
@@ -8,6 +8,8 @@ Specific test class:
|
||||
python -m unittest tests.ticker.TestTicker
|
||||
|
||||
"""
|
||||
from datetime import datetime, timedelta
|
||||
|
||||
import pandas as pd
|
||||
|
||||
from tests.context import yfinance as yf
|
||||
@@ -133,8 +135,55 @@ class TestTicker(unittest.TestCase):
|
||||
with self.assertRaises(YFInvalidPeriodError):
|
||||
dat.history(period="2wks", interval="1d", raise_errors=True)
|
||||
with self.assertRaises(YFInvalidPeriodError):
|
||||
dat.history(period="2mo", interval="1d", raise_errors=True)
|
||||
dat.history(period="2mos", interval="1d", raise_errors=True)
|
||||
|
||||
def test_valid_custom_periods(self):
|
||||
valid_periods = [
|
||||
# Yahoo provided periods
|
||||
("1d", "1m"), ("5d", "15m"), ("1mo", "1d"), ("3mo", "1wk"),
|
||||
("6mo", "1d"), ("1y", "1mo"), ("5y", "1wk"), ("max", "1mo"),
|
||||
|
||||
# Custom periods
|
||||
("2d", "30m"), ("10mo", "1d"), ("1y", "1d"), ("3y", "1d"),
|
||||
("2wk", "15m"), ("6mo", "5d"), ("10y", "1wk")
|
||||
]
|
||||
|
||||
tkr = "AAPL"
|
||||
dat = yf.Ticker(tkr, session=self.session)
|
||||
|
||||
for period, interval in valid_periods:
|
||||
with self.subTest(period=period, interval=interval):
|
||||
df = dat.history(period=period, interval=interval, raise_errors=True)
|
||||
self.assertIsInstance(df, pd.DataFrame)
|
||||
self.assertFalse(df.empty, f"No data returned for period={period}, interval={interval}")
|
||||
self.assertIn("Close", df.columns, f"'Close' column missing for period={period}, interval={interval}")
|
||||
|
||||
# Validate date range
|
||||
now = datetime.now()
|
||||
if period != "max": # Difficult to assert for "max", therefore we skip
|
||||
if period.endswith("d"):
|
||||
days = int(period[:-1])
|
||||
expected_start = now - timedelta(days=days)
|
||||
elif period.endswith("mo"):
|
||||
months = int(period[:-2])
|
||||
expected_start = now - timedelta(days=30 * months)
|
||||
elif period.endswith("y"):
|
||||
years = int(period[:-1])
|
||||
expected_start = now - timedelta(days=365 * years)
|
||||
elif period.endswith("wk"):
|
||||
weeks = int(period[:-2])
|
||||
expected_start = now - timedelta(weeks=weeks)
|
||||
else:
|
||||
continue
|
||||
|
||||
actual_start = df.index[0].to_pydatetime().replace(tzinfo=None)
|
||||
expected_start = expected_start.replace(hour=0, minute=0, second=0, microsecond=0)
|
||||
|
||||
# leeway added because of weekends
|
||||
self.assertGreaterEqual(actual_start, expected_start - timedelta(days=10),
|
||||
f"Start date {actual_start} out of range for period={period}")
|
||||
self.assertLessEqual(df.index[-1].to_pydatetime().replace(tzinfo=None), now,
|
||||
f"End date {df.index[-1]} out of range for period={period}")
|
||||
|
||||
def test_prices_missing(self):
|
||||
# this test will need to be updated every time someone wants to run a test
|
||||
@@ -259,14 +308,13 @@ class TestTickerHistory(unittest.TestCase):
|
||||
actual_urls_called[i] = u
|
||||
actual_urls_called = tuple(actual_urls_called)
|
||||
|
||||
expected_urls = (
|
||||
f"https://query2.finance.yahoo.com/v8/finance/chart/{symbol}?events=div%2Csplits%2CcapitalGains&includePrePost=False&interval=1d&range={period}",
|
||||
)
|
||||
self.assertEqual(
|
||||
expected_urls,
|
||||
actual_urls_called,
|
||||
"Different than expected url used to fetch history."
|
||||
)
|
||||
expected_urls = [
|
||||
f"https://query2.finance.yahoo.com/v8/finance/chart/{symbol}?interval=1d&range=1d", # ticker's tz
|
||||
f"https://query2.finance.yahoo.com/v8/finance/chart/{symbol}?events=div%2Csplits%2CcapitalGains&includePrePost=False&interval=1d&range={period}"
|
||||
]
|
||||
for url in actual_urls_called:
|
||||
self.assertTrue(url in expected_urls, f"Unexpected URL called: {url}")
|
||||
|
||||
def test_dividends(self):
|
||||
data = self.ticker.dividends
|
||||
self.assertIsInstance(data, pd.Series, "data has wrong type")
|
||||
@@ -309,7 +357,7 @@ class TestTickerEarnings(unittest.TestCase):
|
||||
def test_earnings_dates_with_limit(self):
|
||||
# use ticker with lots of historic earnings
|
||||
ticker = yf.Ticker("IBM")
|
||||
limit = 110
|
||||
limit = 100
|
||||
data = ticker.get_earnings_dates(limit=limit)
|
||||
self.assertIsInstance(data, pd.DataFrame, "data has wrong type")
|
||||
self.assertFalse(data.empty, "data is empty")
|
||||
@@ -760,7 +808,7 @@ class TestTickerAnalysts(unittest.TestCase):
|
||||
self.assertIsInstance(data, pd.DataFrame, "data has wrong type")
|
||||
self.assertFalse(data.empty, "data is empty")
|
||||
self.assertTrue(len(data.columns) == 4, "data has wrong number of columns")
|
||||
self.assertEqual(data.columns.values.tolist(), ['Firm', 'ToGrade', 'FromGrade', 'Action'], "data has wrong column names")
|
||||
self.assertCountEqual(data.columns.values.tolist(), ['Firm', 'ToGrade', 'FromGrade', 'Action'], "data has wrong column names")
|
||||
self.assertIsInstance(data.index, pd.DatetimeIndex, "data has wrong index type")
|
||||
|
||||
data_cached = self.ticker.upgrades_downgrades
|
||||
@@ -770,9 +818,6 @@ class TestTickerAnalysts(unittest.TestCase):
|
||||
data = self.ticker.analyst_price_targets
|
||||
self.assertIsInstance(data, dict, "data has wrong type")
|
||||
|
||||
keys = {'current', 'low', 'high', 'mean', 'median'}
|
||||
self.assertEqual(data.keys(), keys, "data has wrong keys")
|
||||
|
||||
data_cached = self.ticker.analyst_price_targets
|
||||
self.assertIs(data, data_cached, "data not cached")
|
||||
|
||||
@@ -781,12 +826,6 @@ class TestTickerAnalysts(unittest.TestCase):
|
||||
self.assertIsInstance(data, pd.DataFrame, "data has wrong type")
|
||||
self.assertFalse(data.empty, "data is empty")
|
||||
|
||||
columns = ['numberOfAnalysts', 'avg', 'low', 'high', 'yearAgoEps', 'growth']
|
||||
self.assertEqual(data.columns.values.tolist(), columns, "data has wrong column names")
|
||||
|
||||
index = ['0q', '+1q', '0y', '+1y']
|
||||
self.assertEqual(data.index.values.tolist(), index, "data has wrong row names")
|
||||
|
||||
data_cached = self.ticker.earnings_estimate
|
||||
self.assertIs(data, data_cached, "data not cached")
|
||||
|
||||
@@ -795,12 +834,6 @@ class TestTickerAnalysts(unittest.TestCase):
|
||||
self.assertIsInstance(data, pd.DataFrame, "data has wrong type")
|
||||
self.assertFalse(data.empty, "data is empty")
|
||||
|
||||
columns = ['numberOfAnalysts', 'avg', 'low', 'high', 'yearAgoRevenue', 'growth']
|
||||
self.assertEqual(data.columns.values.tolist(), columns, "data has wrong column names")
|
||||
|
||||
index = ['0q', '+1q', '0y', '+1y']
|
||||
self.assertEqual(data.index.values.tolist(), index, "data has wrong row names")
|
||||
|
||||
data_cached = self.ticker.revenue_estimate
|
||||
self.assertIs(data, data_cached, "data not cached")
|
||||
|
||||
@@ -809,8 +842,6 @@ class TestTickerAnalysts(unittest.TestCase):
|
||||
self.assertIsInstance(data, pd.DataFrame, "data has wrong type")
|
||||
self.assertFalse(data.empty, "data is empty")
|
||||
|
||||
columns = ['epsEstimate', 'epsActual', 'epsDifference', 'surprisePercent']
|
||||
self.assertEqual(data.columns.values.tolist(), columns, "data has wrong column names")
|
||||
self.assertIsInstance(data.index, pd.DatetimeIndex, "data has wrong index type")
|
||||
|
||||
data_cached = self.ticker.earnings_history
|
||||
@@ -821,12 +852,6 @@ class TestTickerAnalysts(unittest.TestCase):
|
||||
self.assertIsInstance(data, pd.DataFrame, "data has wrong type")
|
||||
self.assertFalse(data.empty, "data is empty")
|
||||
|
||||
columns = ['current', '7daysAgo', '30daysAgo', '60daysAgo', '90daysAgo']
|
||||
self.assertEqual(data.columns.values.tolist(), columns, "data has wrong column names")
|
||||
|
||||
index = ['0q', '+1q', '0y', '+1y']
|
||||
self.assertEqual(data.index.values.tolist(), index, "data has wrong row names")
|
||||
|
||||
data_cached = self.ticker.eps_trend
|
||||
self.assertIs(data, data_cached, "data not cached")
|
||||
|
||||
@@ -835,12 +860,6 @@ class TestTickerAnalysts(unittest.TestCase):
|
||||
self.assertIsInstance(data, pd.DataFrame, "data has wrong type")
|
||||
self.assertFalse(data.empty, "data is empty")
|
||||
|
||||
columns = ['stock', 'industry', 'sector', 'index']
|
||||
self.assertEqual(data.columns.values.tolist(), columns, "data has wrong column names")
|
||||
|
||||
index = ['0q', '+1q', '0y', '+1y']
|
||||
self.assertEqual(data.index.values.tolist(), index, "data has wrong row names")
|
||||
|
||||
data_cached = self.ticker.growth_estimates
|
||||
self.assertIs(data, data_cached, "data not cached")
|
||||
|
||||
|
||||
@@ -12,74 +12,10 @@ from datetime import datetime
|
||||
from unittest import TestSuite
|
||||
|
||||
import pandas as pd
|
||||
# import numpy as np
|
||||
|
||||
from tests.context import yfinance as yf
|
||||
|
||||
import unittest
|
||||
# import requests_cache
|
||||
import tempfile
|
||||
import os
|
||||
|
||||
|
||||
class TestCache(unittest.TestCase):
|
||||
@classmethod
|
||||
def setUpClass(cls):
|
||||
cls.tempCacheDir = tempfile.TemporaryDirectory()
|
||||
yf.set_tz_cache_location(cls.tempCacheDir.name)
|
||||
|
||||
@classmethod
|
||||
def tearDownClass(cls):
|
||||
cls.tempCacheDir.cleanup()
|
||||
|
||||
def test_storeTzNoRaise(self):
|
||||
# storing TZ to cache should never raise exception
|
||||
tkr = 'AMZN'
|
||||
tz1 = "America/New_York"
|
||||
tz2 = "London/Europe"
|
||||
cache = yf.cache.get_tz_cache()
|
||||
cache.store(tkr, tz1)
|
||||
cache.store(tkr, tz2)
|
||||
|
||||
def test_setTzCacheLocation(self):
|
||||
self.assertEqual(yf.cache._TzDBManager.get_location(), self.tempCacheDir.name)
|
||||
|
||||
tkr = 'AMZN'
|
||||
tz1 = "America/New_York"
|
||||
cache = yf.cache.get_tz_cache()
|
||||
cache.store(tkr, tz1)
|
||||
|
||||
self.assertTrue(os.path.exists(os.path.join(self.tempCacheDir.name, "tkr-tz.db")))
|
||||
|
||||
|
||||
class TestCacheNoPermission(unittest.TestCase):
|
||||
@classmethod
|
||||
def setUpClass(cls):
|
||||
yf.set_tz_cache_location("/root/yf-cache")
|
||||
|
||||
def test_tzCacheRootStore(self):
|
||||
# Test that if cache path in read-only filesystem, no exception.
|
||||
tkr = 'AMZN'
|
||||
tz1 = "America/New_York"
|
||||
|
||||
# During attempt to store, will discover cannot write
|
||||
yf.cache.get_tz_cache().store(tkr, tz1)
|
||||
|
||||
# Handling the store failure replaces cache with a dummy
|
||||
cache = yf.cache.get_tz_cache()
|
||||
self.assertTrue(cache.dummy)
|
||||
cache.store(tkr, tz1)
|
||||
|
||||
def test_tzCacheRootLookup(self):
|
||||
# Test that if cache path in read-only filesystem, no exception.
|
||||
tkr = 'AMZN'
|
||||
# During attempt to lookup, will discover cannot write
|
||||
yf.cache.get_tz_cache().lookup(tkr)
|
||||
|
||||
# Handling the lookup failure replaces cache with a dummy
|
||||
cache = yf.cache.get_tz_cache()
|
||||
self.assertTrue(cache.dummy)
|
||||
cache.lookup(tkr)
|
||||
from yfinance.utils import is_valid_period_format
|
||||
|
||||
|
||||
class TestPandas(unittest.TestCase):
|
||||
@@ -105,11 +41,30 @@ class TestPandas(unittest.TestCase):
|
||||
i += 1
|
||||
|
||||
|
||||
class TestUtils(unittest.TestCase):
|
||||
def test_is_valid_period_format_valid(self):
|
||||
self.assertTrue(is_valid_period_format("1d"))
|
||||
self.assertTrue(is_valid_period_format("5wk"))
|
||||
self.assertTrue(is_valid_period_format("12mo"))
|
||||
self.assertTrue(is_valid_period_format("2y"))
|
||||
|
||||
def test_is_valid_period_format_invalid(self):
|
||||
self.assertFalse(is_valid_period_format("1m")) # Incorrect suffix
|
||||
self.assertFalse(is_valid_period_format("2wks")) # Incorrect suffix
|
||||
self.assertFalse(is_valid_period_format("10")) # Missing suffix
|
||||
self.assertFalse(is_valid_period_format("abc")) # Invalid string
|
||||
self.assertFalse(is_valid_period_format("")) # Empty string
|
||||
|
||||
def test_is_valid_period_format_edge_cases(self):
|
||||
self.assertFalse(is_valid_period_format(None)) # None input
|
||||
self.assertFalse(is_valid_period_format("0d")) # Zero is invalid
|
||||
self.assertTrue(is_valid_period_format("999mo")) # Large number valid
|
||||
|
||||
|
||||
def suite():
|
||||
ts: TestSuite = unittest.TestSuite()
|
||||
ts.addTest(TestCache('Test cache'))
|
||||
ts.addTest(TestCacheNoPermission('Test cache no permission'))
|
||||
ts.addTest(TestPandas("Test pandas"))
|
||||
ts.addTest(TestUtils("Test utils"))
|
||||
return ts
|
||||
|
||||
|
||||
|
||||
@@ -20,6 +20,7 @@
|
||||
#
|
||||
|
||||
from . import version
|
||||
from .search import Search
|
||||
from .ticker import Ticker
|
||||
from .tickers import Tickers
|
||||
from .multi import download
|
||||
@@ -27,8 +28,10 @@ from .utils import enable_debug_mode
|
||||
from .cache import set_tz_cache_location
|
||||
from .domain.sector import Sector
|
||||
from .domain.industry import Industry
|
||||
from .screener.screener import Screener
|
||||
from .screener.screener_query import EquityQuery
|
||||
from .domain.market import Market
|
||||
|
||||
from .screener.query import EquityQuery, FundQuery
|
||||
from .screener.screener import screen, PREDEFINED_SCREENER_QUERIES
|
||||
|
||||
__version__ = version.version
|
||||
__author__ = "Ran Aroussi"
|
||||
@@ -36,5 +39,6 @@ __author__ = "Ran Aroussi"
|
||||
import warnings
|
||||
warnings.filterwarnings('default', category=DeprecationWarning, module='^yfinance')
|
||||
|
||||
__all__ = ['download', 'Ticker', 'Tickers', 'enable_debug_mode', 'set_tz_cache_location', 'Sector', 'Industry',
|
||||
'EquityQuery','Screener']
|
||||
__all__ = ['download', 'Market', 'Search', 'Ticker', 'Tickers', 'enable_debug_mode', 'set_tz_cache_location', 'Sector', 'Industry']
|
||||
# screener stuff:
|
||||
__all__ += ['EquityQuery', 'FundQuery', 'screen', 'PREDEFINED_SCREENER_QUERIES']
|
||||
153
yfinance/base.py
153
yfinance/base.py
@@ -21,18 +21,18 @@
|
||||
|
||||
from __future__ import print_function
|
||||
|
||||
from io import StringIO
|
||||
import json as _json
|
||||
import warnings
|
||||
from typing import Optional, Union
|
||||
from urllib.parse import quote as urlencode
|
||||
|
||||
import numpy as np
|
||||
import pandas as pd
|
||||
import requests
|
||||
|
||||
from . import utils, cache
|
||||
from .data import YfData
|
||||
from .exceptions import YFEarningsDateMissing
|
||||
from .exceptions import YFEarningsDateMissing, YFRateLimitError
|
||||
from .scrapers.analysis import Analysis
|
||||
from .scrapers.fundamentals import Fundamentals
|
||||
from .scrapers.holders import Holders
|
||||
@@ -40,7 +40,7 @@ from .scrapers.quote import Quote, FastInfo
|
||||
from .scrapers.history import PriceHistory
|
||||
from .scrapers.funds import FundsData
|
||||
|
||||
from .const import _BASE_URL_, _ROOT_URL_
|
||||
from .const import _BASE_URL_, _ROOT_URL_, _QUERY1_URL_
|
||||
|
||||
|
||||
class TickerBase:
|
||||
@@ -124,6 +124,9 @@ class TickerBase:
|
||||
try:
|
||||
data = self._data.cache_get(url=url, params=params, proxy=proxy, timeout=timeout)
|
||||
data = data.json()
|
||||
except YFRateLimitError:
|
||||
# Must propagate this
|
||||
raise
|
||||
except Exception as e:
|
||||
logger.error(f"Failed to get ticker '{self.ticker}' reason: {e}")
|
||||
return None
|
||||
@@ -534,26 +537,45 @@ class TickerBase:
|
||||
self._isin = data.split(search_str)[1].split('"')[0].split('|')[0]
|
||||
return self._isin
|
||||
|
||||
def get_news(self, proxy=None) -> list:
|
||||
def get_news(self, count=10, tab="news", proxy=None) -> list:
|
||||
"""Allowed options for tab: "news", "all", "press releases"""
|
||||
if self._news:
|
||||
return self._news
|
||||
|
||||
# Getting data from json
|
||||
url = f"{_BASE_URL_}/v1/finance/search?q={self.ticker}"
|
||||
data = self._data.cache_get(url=url, proxy=proxy)
|
||||
logger = utils.get_yf_logger()
|
||||
|
||||
tab_queryrefs = {
|
||||
"all": "newsAll",
|
||||
"news": "latestNews",
|
||||
"press releases": "pressRelease",
|
||||
}
|
||||
|
||||
query_ref = tab_queryrefs.get(tab.lower())
|
||||
if not query_ref:
|
||||
raise ValueError(f"Invalid tab name '{tab}'. Choose from: {', '.join(tab_queryrefs.keys())}")
|
||||
|
||||
url = f"{_ROOT_URL_}/xhr/ncp?queryRef={query_ref}&serviceKey=ncp_fin"
|
||||
payload = {
|
||||
"serviceConfig": {
|
||||
"snippetCount": count,
|
||||
"s": [self.ticker]
|
||||
}
|
||||
}
|
||||
|
||||
data = self._data.post(url, body=payload, proxy=proxy)
|
||||
if data is None or "Will be right back" in data.text:
|
||||
raise RuntimeError("*** YAHOO! FINANCE IS CURRENTLY DOWN! ***\n"
|
||||
"Our engineers are working quickly to resolve "
|
||||
"the issue. Thank you for your patience.")
|
||||
try:
|
||||
data = data.json()
|
||||
except (_json.JSONDecodeError):
|
||||
logger = utils.get_yf_logger()
|
||||
except _json.JSONDecodeError:
|
||||
logger.error(f"{self.ticker}: Failed to retrieve the news and received faulty response instead.")
|
||||
data = {}
|
||||
|
||||
# parse news
|
||||
self._news = data.get("news", [])
|
||||
news = data.get("data", {}).get("tickerStream", {}).get("stream", [])
|
||||
|
||||
self._news = [article for article in news if not article.get('ad', [])]
|
||||
return self._news
|
||||
|
||||
@utils.log_indent_decorator
|
||||
@@ -570,85 +592,62 @@ class TickerBase:
|
||||
Returns:
|
||||
pd.DataFrame
|
||||
"""
|
||||
if self._earnings_dates and limit in self._earnings_dates:
|
||||
return self._earnings_dates[limit]
|
||||
|
||||
logger = utils.get_yf_logger()
|
||||
clamped_limit = min(limit, 100) # YF caps at 100, don't go higher
|
||||
|
||||
page_size = min(limit, 100) # YF caps at 100, don't go higher
|
||||
page_offset = 0
|
||||
dates = None
|
||||
while True:
|
||||
url = f"{_ROOT_URL_}/calendar/earnings?symbol={self.ticker}&offset={page_offset}&size={page_size}"
|
||||
data = self._data.cache_get(url=url, proxy=proxy).text
|
||||
if self._earnings_dates and clamped_limit in self._earnings_dates:
|
||||
return self._earnings_dates[clamped_limit]
|
||||
|
||||
if "Will be right back" in data:
|
||||
raise RuntimeError("*** YAHOO! FINANCE IS CURRENTLY DOWN! ***\n"
|
||||
"Our engineers are working quickly to resolve "
|
||||
"the issue. Thank you for your patience.")
|
||||
# Fetch data
|
||||
url = f"{_QUERY1_URL_}/v1/finance/visualization"
|
||||
params = {"lang": "en-US", "region": "US"}
|
||||
body = {
|
||||
"size": clamped_limit,
|
||||
"query": {
|
||||
"operator": "and",
|
||||
"operands": [
|
||||
{"operator": "eq", "operands": ["ticker", self.ticker]},
|
||||
{"operator": "eq", "operands": ["eventtype", "2"]}
|
||||
]
|
||||
},
|
||||
"sortField": "startdatetime",
|
||||
"sortType": "DESC",
|
||||
"entityIdType": "earnings",
|
||||
"includeFields": ["startdatetime", "timeZoneShortName", "epsestimate", "epsactual", "epssurprisepct"]
|
||||
}
|
||||
response = self._data.post(url, params=params, body=body, proxy=proxy)
|
||||
json_data = response.json()
|
||||
|
||||
try:
|
||||
data = pd.read_html(StringIO(data))[0]
|
||||
except ValueError:
|
||||
if page_offset == 0:
|
||||
# Should not fail on first page
|
||||
if "Showing Earnings for:" in data:
|
||||
# Actually YF was successful, problem is company doesn't have earnings history
|
||||
dates = utils.empty_earnings_dates_df()
|
||||
break
|
||||
if dates is None:
|
||||
dates = data
|
||||
else:
|
||||
dates = pd.concat([dates, data], axis=0)
|
||||
# Extract data
|
||||
columns = [row['label'] for row in json_data['finance']['result'][0]['documents'][0]['columns']]
|
||||
rows = json_data['finance']['result'][0]['documents'][0]['rows']
|
||||
df = pd.DataFrame(rows, columns=columns)
|
||||
|
||||
page_offset += page_size
|
||||
# got less data then we asked for or already fetched all we requested, no need to fetch more pages
|
||||
if len(data) < page_size or len(dates) >= limit:
|
||||
dates = dates.iloc[:limit]
|
||||
break
|
||||
else:
|
||||
# do not fetch more than needed next time
|
||||
page_size = min(limit - len(dates), page_size)
|
||||
|
||||
if dates is None or dates.shape[0] == 0:
|
||||
if df.empty:
|
||||
_exception = YFEarningsDateMissing(self.ticker)
|
||||
err_msg = str(_exception)
|
||||
logger.error(f'{self.ticker}: {err_msg}')
|
||||
return None
|
||||
dates = dates.reset_index(drop=True)
|
||||
|
||||
# Drop redundant columns
|
||||
dates = dates.drop(["Symbol", "Company"], axis=1)
|
||||
# Calculate earnings date
|
||||
df['Earnings Date'] = pd.to_datetime(df['Event Start Date']).dt.normalize()
|
||||
tz = self._get_ticker_tz(proxy=proxy, timeout=30)
|
||||
if df['Earnings Date'].dt.tz is None:
|
||||
df['Earnings Date'] = df['Earnings Date'].dt.tz_localize(tz)
|
||||
else:
|
||||
df['Earnings Date'] = df['Earnings Date'].dt.tz_convert(tz)
|
||||
|
||||
# Convert types
|
||||
for cn in ["EPS Estimate", "Reported EPS", "Surprise(%)"]:
|
||||
dates.loc[dates[cn] == '-', cn] = float("nan")
|
||||
dates[cn] = dates[cn].astype(float)
|
||||
columns_to_update = ['Surprise (%)', 'EPS Estimate', 'Reported EPS']
|
||||
df[columns_to_update] = df[columns_to_update].astype('float64').replace(0.0, np.nan)
|
||||
|
||||
# Convert % to range 0->1:
|
||||
dates["Surprise(%)"] *= 0.01
|
||||
# Format the dataframe
|
||||
df.drop(['Event Start Date', 'Timezone short name'], axis=1, inplace=True)
|
||||
df.set_index('Earnings Date', inplace=True)
|
||||
df.rename(columns={'Surprise (%)': 'Surprise(%)'}, inplace=True) # Compatibility
|
||||
|
||||
# Parse earnings date string
|
||||
cn = "Earnings Date"
|
||||
# - remove AM/PM and timezone from date string
|
||||
tzinfo = dates[cn].str.extract('([AP]M[a-zA-Z]*)$')
|
||||
dates[cn] = dates[cn].replace(' [AP]M[a-zA-Z]*$', '', regex=True)
|
||||
# - split AM/PM from timezone
|
||||
tzinfo = tzinfo[0].str.extract('([AP]M)([a-zA-Z]*)', expand=True)
|
||||
tzinfo.columns = ["AM/PM", "TZ"]
|
||||
# - combine and parse
|
||||
dates[cn] = dates[cn] + ' ' + tzinfo["AM/PM"]
|
||||
dates[cn] = pd.to_datetime(dates[cn], format="%b %d, %Y, %I %p")
|
||||
# - instead of attempting decoding of ambiguous timezone abbreviation, just use 'info':
|
||||
self._quote.proxy = proxy or self.proxy
|
||||
tz = self._get_ticker_tz(proxy=proxy, timeout=30)
|
||||
dates[cn] = dates[cn].dt.tz_localize(tz)
|
||||
|
||||
dates = dates.set_index("Earnings Date")
|
||||
|
||||
self._earnings_dates[limit] = dates
|
||||
|
||||
return dates
|
||||
self._earnings_dates[clamped_limit] = df
|
||||
return df
|
||||
|
||||
def get_history_metadata(self, proxy=None) -> dict:
|
||||
return self._lazy_load_price_history().get_history_metadata(proxy)
|
||||
@@ -657,4 +656,4 @@ class TickerBase:
|
||||
if not self._funds_data:
|
||||
self._funds_data = FundsData(self._data, self.ticker)
|
||||
|
||||
return self._funds_data
|
||||
return self._funds_data
|
||||
|
||||
@@ -305,21 +305,92 @@ SECTOR_INDUSTY_MAPPING = {
|
||||
'utilities-independent-power-producers',
|
||||
'utilities-regulated-water'}
|
||||
}
|
||||
|
||||
def merge_two_level_dicts(dict1, dict2):
|
||||
result = dict1.copy()
|
||||
for key, value in dict2.items():
|
||||
if key in result:
|
||||
# If both are sets, merge them
|
||||
if isinstance(value, set) and isinstance(result[key], set):
|
||||
result[key] = result[key] | value
|
||||
# If both are dicts, merge their contents
|
||||
elif isinstance(value, dict) and isinstance(result[key], dict):
|
||||
result[key] = {
|
||||
k: (result[key].get(k, set()) | v if isinstance(v, set)
|
||||
else v) if k in result[key]
|
||||
else v
|
||||
for k, v in value.items()
|
||||
}
|
||||
else:
|
||||
result[key] = value
|
||||
return result
|
||||
|
||||
EQUITY_SCREENER_EQ_MAP = {
|
||||
"region": {
|
||||
"za", "ve", "vn", "us", "tw", "th", "tr", "sr", "sg", "sa", "se", "ru", "ro", "qa", "pt", "pk", "pl",
|
||||
"ph", "nz", "nl", "mx", "pe", "no", "my", "lv", "lt", "kw", "jp", "is", "il", "lk", "kr", "it", "in",
|
||||
"ie", "hu", "id", "hk", "gb", "fi", "eg", "dk", "gr", "fr", "es", "ee", "de", "cz", "cl", "ca", "be",
|
||||
"at", "cn", "br", "au", "ar", "ch"
|
||||
"exchange": {
|
||||
'ar': {'BUE'},
|
||||
'at': {'VIE'},
|
||||
'au': {'ASX'},
|
||||
'be': {'BRU'},
|
||||
'br': {'SAO'},
|
||||
'ca': {'CNQ', 'NEO', 'TOR', 'VAN'},
|
||||
'ch': {'EBS'},
|
||||
'cl': {'SGO'},
|
||||
'cn': {'SHH', 'SHZ'},
|
||||
'co': {'BVC'},
|
||||
'cz': {'PRA'},
|
||||
'de': {'BER', 'DUS', 'FRA', 'HAM', 'GER', 'MUN', 'STU'},
|
||||
'dk': {'CPH'},
|
||||
'ee': {'TAL'},
|
||||
'eg': {'CAI'},
|
||||
'es': {'MCE'},
|
||||
'fi': {'HEL'},
|
||||
'fr': {'PAR'},
|
||||
'gb': {'AQS', 'IOB', 'LSE'},
|
||||
'gr': {'ATH'},
|
||||
'hk': {'HKG'},
|
||||
'hu': {'BUD'},
|
||||
'id': {'JKT'},
|
||||
'ie': {'ISE'},
|
||||
'il': {'TLV'},
|
||||
'in': {'BSE', 'NSI'},
|
||||
'is': {'ICE'},
|
||||
'it': {'MIL'},
|
||||
'jp': {'FKA', 'JPX', 'SAP'},
|
||||
'kr': {'KOE', 'KSC'},
|
||||
'kw': {'KUW'},
|
||||
'lk': {},
|
||||
'lt': {'LIT'},
|
||||
'lv': {'RIS'},
|
||||
'mx': {'MEX'},
|
||||
'my': {'KLS'},
|
||||
'nl': {'AMS'},
|
||||
'no': {'OSL'},
|
||||
'nz': {'NZE'},
|
||||
'pe': {},
|
||||
'ph': {'PHP', 'PHS'},
|
||||
'pk': {},
|
||||
'pl': {'WSE'},
|
||||
'pt': {'LIS'},
|
||||
'qa': {'DOH'},
|
||||
'ro': {'BVB'},
|
||||
'ru': {},
|
||||
'sa': {'SAU'},
|
||||
'se': {'STO'},
|
||||
'sg': {'SES'},
|
||||
'sr': {},
|
||||
'th': {'SET'},
|
||||
'tr': {'IST'},
|
||||
'tw': {'TAI', 'TWO'},
|
||||
'us': {'ASE', 'BTS', 'CXI', 'NCM', 'NGM', 'NMS', 'NYQ', 'OEM', 'OQB', 'OQX', 'PCX', 'PNK', 'YHD'},
|
||||
've': {'CCS'},
|
||||
'vn': {},
|
||||
'za': {'JNB'}
|
||||
},
|
||||
"sector": {
|
||||
"Basic Materials", "Industrials", "Communication Services", "Healthcare",
|
||||
"Real Estate", "Technology", "Energy", "Utilities", "Financial Services",
|
||||
"Consumer Defensive", "Consumer Cyclical"
|
||||
},
|
||||
"exchanges": {
|
||||
"NMS", "NAS", "YHD", "NYQ", "NGM", "NCM", "BSE"
|
||||
},
|
||||
"peer_group": {
|
||||
"US Fund Equity Energy",
|
||||
"US CE Convertibles",
|
||||
@@ -426,20 +497,42 @@ EQUITY_SCREENER_EQ_MAP = {
|
||||
"Banks"
|
||||
}
|
||||
}
|
||||
EQUITY_SCREENER_EQ_MAP['region'] = EQUITY_SCREENER_EQ_MAP['exchange'].keys()
|
||||
ordered_keys = ['region'] + [k for k in EQUITY_SCREENER_EQ_MAP.keys() if k != 'region']
|
||||
EQUITY_SCREENER_EQ_MAP = {k:EQUITY_SCREENER_EQ_MAP[k] for k in ordered_keys}
|
||||
FUND_SCREENER_EQ_MAP = {
|
||||
"exchange": {
|
||||
'us': {'NAS'}
|
||||
}
|
||||
}
|
||||
COMMON_SCREENER_FIELDS = {
|
||||
"price":{
|
||||
"eodprice",
|
||||
"intradaypricechange",
|
||||
"intradayprice"
|
||||
},
|
||||
"eq_fields": {
|
||||
"exchange"},
|
||||
}
|
||||
FUND_SCREENER_FIELDS = {
|
||||
"eq_fields": {
|
||||
"categoryname",
|
||||
"performanceratingoverall",
|
||||
"initialinvestment",
|
||||
"annualreturnnavy1categoryrank",
|
||||
"riskratingoverall"}
|
||||
}
|
||||
FUND_SCREENER_FIELDS = merge_two_level_dicts(FUND_SCREENER_FIELDS, COMMON_SCREENER_FIELDS)
|
||||
EQUITY_SCREENER_FIELDS = {
|
||||
"eq_fields": {
|
||||
"region",
|
||||
"sector",
|
||||
"peer_group",
|
||||
"exchanges"},
|
||||
"peer_group"},
|
||||
"price":{
|
||||
"eodprice",
|
||||
"intradaypricechange",
|
||||
"lastclosemarketcap.lasttwelvemonths",
|
||||
"percentchange",
|
||||
"lastclose52weekhigh.lasttwelvemonths",
|
||||
"fiftytwowkpercentchange",
|
||||
"intradayprice",
|
||||
"lastclose52weeklow.lasttwelvemonths",
|
||||
"intradaymarketcap"},
|
||||
"trading":{
|
||||
@@ -530,21 +623,4 @@ EQUITY_SCREENER_FIELDS = {
|
||||
"social_score",
|
||||
"highest_controversy"}
|
||||
}
|
||||
|
||||
PREDEFINED_SCREENER_BODY_MAP = {
|
||||
'aggressive_small_caps': {"offset":0,"size":25,"sortField":"eodvolume","sortType":"desc","quoteType":"equity","query":{"operator":"and","operands":[{"operator":"or","operands":[{"operator":"eq","operands":["exchange","NMS"]},{"operator":"eq","operands":["exchange","NYQ"]}]},{"operator":"or","operands":[{"operator":"LT","operands":["epsgrowth.lasttwelvemonths",15]}]}]},"userId":"","userIdType":"guid"},
|
||||
'day_gainers': {"offset":0,"size":25,"sortField":"percentchange","sortType":"DESC","quoteType":"EQUITY","query":{"operator":"AND","operands":[{"operator":"gt","operands":["percentchange",3]},{"operator":"eq","operands":["region","us"]},{"operator":"or","operands":[{"operator":"BTWN","operands":["intradaymarketcap",2000000000,10000000000]},{"operator":"BTWN","operands":["intradaymarketcap",10000000000,100000000000]},{"operator":"GT","operands":["intradaymarketcap",100000000000]}]},{"operator":"gte","operands":["intradayprice",5]},{"operator":"gt","operands":["dayvolume",15000]}]},"userId":"","userIdType":"guid"},
|
||||
'day_losers': {"offset":0,"size":25,"sortField":"percentchange","sortType":"ASC","quoteType":"EQUITY","query":{"operator":"AND","operands":[{"operator":"lt","operands":["percentchange",-2.5]},{"operator":"eq","operands":["region","us"]},{"operator":"or","operands":[{"operator":"BTWN","operands":["intradaymarketcap",2000000000,10000000000]},{"operator":"BTWN","operands":["intradaymarketcap",10000000000,100000000000]},{"operator":"GT","operands":["intradaymarketcap",100000000000]}]},{"operator":"gte","operands":["intradayprice",5]},{"operator":"gt","operands":["dayvolume",20000]}]},"userId":"","userIdType":"guid"},
|
||||
'growth_technology_stocks': {"offset":0,"size":25,"sortField":"eodvolume","sortType":"desc","quoteType":"equity","query":{"operator":"and","operands":[{"operator":"or","operands":[{"operator":"BTWN","operands":["quarterlyrevenuegrowth.quarterly",50,100]},{"operator":"GT","operands":["quarterlyrevenuegrowth.quarterly",100]},{"operator":"BTWN","operands":["quarterlyrevenuegrowth.quarterly",25,50]}]},{"operator":"or","operands":[{"operator":"BTWN","operands":["epsgrowth.lasttwelvemonths",25,50]},{"operator":"BTWN","operands":["epsgrowth.lasttwelvemonths",50,100]},{"operator":"GT","operands":["epsgrowth.lasttwelvemonths",100]}]},{"operator":"eq","operands":["sector","Technology"]},{"operator":"or","operands":[{"operator":"eq","operands":["exchange","NMS"]},{"operator":"eq","operands":["exchange","NYQ"]}]}]},"userId":"","userIdType":"guid"},
|
||||
'most_actives': {"offset":0,"size":25,"sortField":"dayvolume","sortType":"DESC","quoteType":"EQUITY","query":{"operator":"AND","operands":[{"operator":"eq","operands":["region","us"]},{"operator":"or","operands":[{"operator":"BTWN","operands":["intradaymarketcap",10000000000,100000000000]},{"operator":"GT","operands":["intradaymarketcap",100000000000]},{"operator":"BTWN","operands":["intradaymarketcap",2000000000,10000000000]}]},{"operator":"gt","operands":["dayvolume",5000000]}]},"userId":"","userIdType":"guid"},
|
||||
'most_shorted_stocks': {"size":25,"offset":0,"sortField":"short_percentage_of_shares_outstanding.value","sortType":"DESC","quoteType":"EQUITY","topOperator":"AND","query":{"operator":"AND","operands":[{"operator":"or","operands":[{"operator":"EQ","operands":["region","us"]}]},{"operator":"gt","operands":["intradayprice",1]},{"operator":"gt","operands":["avgdailyvol3m",200000]}]},"userId":"","userIdType":"guid"},
|
||||
'small_cap_gainers': {"offset":0,"size":25,"sortField":"eodvolume","sortType":"desc","quoteType":"equity","query":{"operator":"and","operands":[{"operator":"lt","operands":["intradaymarketcap",2000000000]},{"operator":"or","operands":[{"operator":"eq","operands":["exchange","NMS"]},{"operator":"eq","operands":["exchange","NYQ"]}]}]},"userId":"","userIdType":"guid"},
|
||||
'undervalued_growth_stocks': {"offset":0,"size":25,"sortType":"DESC","sortField":"eodvolume","quoteType":"EQUITY","query":{"operator":"and","operands":[{"operator":"or","operands":[{"operator":"BTWN","operands":["peratio.lasttwelvemonths",0,20]}]},{"operator":"or","operands":[{"operator":"LT","operands":["pegratio_5y",1]}]},{"operator":"or","operands":[{"operator":"BTWN","operands":["epsgrowth.lasttwelvemonths",25,50]},{"operator":"BTWN","operands":["epsgrowth.lasttwelvemonths",50,100]},{"operator":"GT","operands":["epsgrowth.lasttwelvemonths",100]}]},{"operator":"or","operands":[{"operator":"eq","operands":["exchange","NMS"]},{"operator":"eq","operands":["exchange","NYQ"]}]}]},"userId":"","userIdType":"guid"},
|
||||
'undervalued_large_caps': {"offset":0,"size":25,"sortField":"eodvolume","sortType":"desc","quoteType":"equity","query":{"operator":"and","operands":[{"operator":"or","operands":[{"operator":"BTWN","operands":["peratio.lasttwelvemonths",0,20]}]},{"operator":"lt","operands":["pegratio_5y",1]},{"operator":"btwn","operands":["intradaymarketcap",10000000000,100000000000]},{"operator":"or","operands":[{"operator":"eq","operands":["exchange","NMS"]},{"operator":"eq","operands":["exchange","NYQ"]}]}]},"userId":"","userIdType":"guid"},
|
||||
'conservative_foreign_funds': {"offset":0,"size":25,"sortType":"DESC","sortField":"fundnetassets","quoteType":"MUTUALFUND","query":{"operator":"and","operands":[{"operator":"or","operands":[{"operator":"EQ","operands":["categoryname","Foreign Large Value"]},{"operator":"EQ","operands":["categoryname","Foreign Large Blend"]},{"operator":"EQ","operands":["categoryname","Foreign Large Growth"]},{"operator":"EQ","operands":["categoryname","Foreign Small/Mid Growth"]},{"operator":"EQ","operands":["categoryname","Foreign Large Blend"]},{"operator":"EQ","operands":["categoryname","Foreign Small/Mid Blend"]},{"operator":"EQ","operands":["categoryname","Foreign Small/Mid Value"]},{"operator":"EQ","operands":["categoryname","Foreign Small/Mid Blend"]},{"operator":"EQ","operands":["categoryname","Foreign Small/Mid Value"]},{"operator":"EQ","operands":["categoryname","Foreign Small/Mid Blend"]},{"operator":"EQ","operands":["categoryname","Foreign Small/Mid Value"]},{"operator":"EQ","operands":["categoryname","Foreign Small/Mid Blend"]},{"operator":"EQ","operands":["categoryname","Foreign Small/Mid Value"]}]},{"operator":"or","operands":[{"operator":"EQ","operands":["performanceratingoverall",4]},{"operator":"EQ","operands":["performanceratingoverall",5]}]},{"operator":"lt","operands":["initialinvestment",100001]},{"operator":"lt","operands":["annualreturnnavy1categoryrank",50]},{"operator":"or","operands":[{"operator":"EQ","operands":["riskratingoverall",1]},{"operator":"EQ","operands":["riskratingoverall",3]},{"operator":"EQ","operands":["riskratingoverall",2]}]},{"operator":"or","operands":[{"operator":"eq","operands":["exchange","NAS"]}]}]},"userId":"","userIdType":"guid"},
|
||||
'high_yield_bond': {"offset":0,"size":25,"sortType":"DESC","sortField":"fundnetassets","quoteType":"MUTUALFUND","query":{"operator":"and","operands":[{"operator":"or","operands":[{"operator":"EQ","operands":["performanceratingoverall",4]},{"operator":"EQ","operands":["performanceratingoverall",5]}]},{"operator":"lt","operands":["initialinvestment",100001]},{"operator":"lt","operands":["annualreturnnavy1categoryrank",50]},{"operator":"or","operands":[{"operator":"EQ","operands":["riskratingoverall",1]},{"operator":"EQ","operands":["riskratingoverall",3]},{"operator":"EQ","operands":["riskratingoverall",2]}]},{"operator":"or","operands":[{"operator":"EQ","operands":["categoryname","High Yield Bond"]}]},{"operator":"or","operands":[{"operator":"eq","operands":["exchange","NAS"]}]}]},"userId":"","userIdType":"guid"},
|
||||
'portfolio_anchors': {"offset":0,"size":25,"sortType":"DESC","sortField":"fundnetassets","quoteType":"MUTUALFUND","query":{"operator":"and","operands":[{"operator":"or","operands":[{"operator":"EQ","operands":["categoryname","Large Blend"]}]},{"operator":"or","operands":[{"operator":"EQ","operands":["performanceratingoverall",4]},{"operator":"EQ","operands":["performanceratingoverall",5]}]},{"operator":"lt","operands":["initialinvestment",100001]},{"operator":"lt","operands":["annualreturnnavy1categoryrank",50]},{"operator":"or","operands":[{"operator":"eq","operands":["exchange","NAS"]}]}]},"userId":"","userIdType":"guid"},
|
||||
'solid_large_growth_funds': {"offset":0,"size":25,"sortType":"DESC","sortField":"fundnetassets","quoteType":"MUTUALFUND","query":{"operator":"and","operands":[{"operator":"or","operands":[{"operator":"EQ","operands":["categoryname","Large Growth"]}]},{"operator":"or","operands":[{"operator":"EQ","operands":["performanceratingoverall",5]},{"operator":"EQ","operands":["performanceratingoverall",4]}]},{"operator":"lt","operands":["initialinvestment",100001]},{"operator":"lt","operands":["annualreturnnavy1categoryrank",50]},{"operator":"or","operands":[{"operator":"eq","operands":["exchange","NAS"]}]}]},"userId":"","userIdType":"guid"},
|
||||
'solid_midcap_growth_funds': {"offset":0,"size":25,"sortType":"DESC","sortField":"fundnetassets","quoteType":"MUTUALFUND","query":{"operator":"and","operands":[{"operator":"or","operands":[{"operator":"EQ","operands":["categoryname","Mid-Cap Growth"]}]},{"operator":"or","operands":[{"operator":"EQ","operands":["performanceratingoverall",5]},{"operator":"EQ","operands":["performanceratingoverall",4]}]},{"operator":"lt","operands":["initialinvestment",100001]},{"operator":"lt","operands":["annualreturnnavy1categoryrank",50]},{"operator":"or","operands":[{"operator":"eq","operands":["exchange","NAS"]}]}]},"userId":"","userIdType":"guid"},
|
||||
'top_mutual_funds': {"offset":0,"size":25,"sortType":"DESC","sortField":"percentchange","quoteType":"MUTUALFUND","query":{"operator":"and","operands":[{"operator":"gt","operands":["intradayprice",15]},{"operator":"or","operands":[{"operator":"EQ","operands":["performanceratingoverall",5]},{"operator":"EQ","operands":["performanceratingoverall",4]}]},{"operator":"gt","operands":["initialinvestment",1000]},{"operator":"or","operands":[{"operator":"eq","operands":["exchange","NAS"]}]}]},"userId":"","userIdType":"guid"}
|
||||
}
|
||||
EQUITY_SCREENER_FIELDS = merge_two_level_dicts(EQUITY_SCREENER_FIELDS, COMMON_SCREENER_FIELDS)
|
||||
|
||||
@@ -10,6 +10,8 @@ from frozendict import frozendict
|
||||
from . import utils, cache
|
||||
import threading
|
||||
|
||||
from .exceptions import YFRateLimitError
|
||||
|
||||
cache_maxsize = 64
|
||||
|
||||
|
||||
@@ -229,11 +231,16 @@ class YfData(metaclass=SingletonMeta):
|
||||
'timeout': timeout}
|
||||
|
||||
get_args = {**base_args, 'url': 'https://guce.yahoo.com/consent'}
|
||||
if self._session_is_caching:
|
||||
get_args['expire_after'] = self._expire_after
|
||||
response = self._session.get(**get_args)
|
||||
else:
|
||||
response = self._session.get(**get_args)
|
||||
try:
|
||||
if self._session_is_caching:
|
||||
get_args['expire_after'] = self._expire_after
|
||||
response = self._session.get(**get_args)
|
||||
else:
|
||||
response = self._session.get(**get_args)
|
||||
except requests.exceptions.ChunkedEncodingError:
|
||||
# No idea why happens, but handle nicely so can switch to other cookie method.
|
||||
utils.get_yf_logger().debug('_get_cookie_csrf() encountering requests.exceptions.ChunkedEncodingError, aborting')
|
||||
return False
|
||||
|
||||
soup = BeautifulSoup(response.content, 'html.parser')
|
||||
csrfTokenInput = soup.find('input', attrs={'name': 'csrfToken'})
|
||||
@@ -262,14 +269,18 @@ class YfData(metaclass=SingletonMeta):
|
||||
get_args = {**base_args,
|
||||
'url': f'https://guce.yahoo.com/copyConsent?sessionId={sessionId}',
|
||||
'data': data}
|
||||
if self._session_is_caching:
|
||||
post_args['expire_after'] = self._expire_after
|
||||
get_args['expire_after'] = self._expire_after
|
||||
self._session.post(**post_args)
|
||||
self._session.get(**get_args)
|
||||
else:
|
||||
self._session.post(**post_args)
|
||||
self._session.get(**get_args)
|
||||
try:
|
||||
if self._session_is_caching:
|
||||
post_args['expire_after'] = self._expire_after
|
||||
get_args['expire_after'] = self._expire_after
|
||||
self._session.post(**post_args)
|
||||
self._session.get(**get_args)
|
||||
else:
|
||||
self._session.post(**post_args)
|
||||
self._session.get(**get_args)
|
||||
except requests.exceptions.ChunkedEncodingError:
|
||||
# No idea why happens, but handle nicely so can switch to other cookie method.
|
||||
utils.get_yf_logger().debug('_get_cookie_csrf() encountering requests.exceptions.ChunkedEncodingError, aborting')
|
||||
self._cookie = True
|
||||
self._save_session_cookies()
|
||||
return True
|
||||
@@ -390,6 +401,10 @@ class YfData(metaclass=SingletonMeta):
|
||||
response = request_method(**request_args)
|
||||
utils.get_yf_logger().debug(f'response code={response.status_code}')
|
||||
|
||||
# Raise exception if rate limited
|
||||
if response.status_code == 429:
|
||||
raise YFRateLimitError()
|
||||
|
||||
return response
|
||||
|
||||
@lru_cache_freezeargs
|
||||
|
||||
100
yfinance/domain/market.py
Normal file
100
yfinance/domain/market.py
Normal file
@@ -0,0 +1,100 @@
|
||||
import datetime as dt
|
||||
|
||||
from ..data import YfData
|
||||
from ..data import utils
|
||||
from ..const import _QUERY1_URL_
|
||||
import json as _json
|
||||
|
||||
class Market:
|
||||
def __init__(self, market:'str', session=None, proxy=None, timeout=30):
|
||||
self.market = market
|
||||
self.session = session
|
||||
self.proxy = proxy
|
||||
self.timeout = timeout
|
||||
|
||||
self._data = YfData(session=self.session)
|
||||
self._logger = utils.get_yf_logger()
|
||||
|
||||
self._status = None
|
||||
self._summary = None
|
||||
|
||||
def _fetch_json(self, url, params):
|
||||
data = self._data.cache_get(url=url, params=params, proxy=self.proxy, timeout=self.timeout)
|
||||
if data is None or "Will be right back" in data.text:
|
||||
raise RuntimeError("*** YAHOO! FINANCE IS CURRENTLY DOWN! ***\n"
|
||||
"Our engineers are working quickly to resolve "
|
||||
"the issue. Thank you for your patience.")
|
||||
try:
|
||||
return data.json()
|
||||
except _json.JSONDecodeError:
|
||||
self._logger.error(f"{self.market}: Failed to retrieve market data and recieved faulty data.")
|
||||
return {}
|
||||
|
||||
def _parse_data(self):
|
||||
# Fetch both to ensure they are at the same time
|
||||
if (self._status is not None) and (self._summary is not None):
|
||||
return
|
||||
|
||||
self._logger.debug(f"{self.market}: Parsing market data")
|
||||
|
||||
# Summary
|
||||
|
||||
summary_url = f"{_QUERY1_URL_}/v6/finance/quote/marketSummary"
|
||||
summary_fields = ["shortName", "regularMarketPrice", "regularMarketChange", "regularMarketChangePercent"]
|
||||
summary_params = {
|
||||
"fields": ",".join(summary_fields),
|
||||
"formatted": False,
|
||||
"lang": "en-US",
|
||||
"market": self.market
|
||||
}
|
||||
|
||||
status_url = f"{_QUERY1_URL_}/v6/finance/markettime"
|
||||
status_params = {
|
||||
"formatted": True,
|
||||
"key": "finance",
|
||||
"lang": "en-US",
|
||||
"market": self.market
|
||||
}
|
||||
|
||||
self._summary = self._fetch_json(summary_url, summary_params)
|
||||
self._status = self._fetch_json(status_url, status_params)
|
||||
|
||||
try:
|
||||
self._summary = self._summary['marketSummaryResponse']['result']
|
||||
self._summary = {x['exchange']:x for x in self._summary}
|
||||
except Exception as e:
|
||||
self._logger.error(f"{self.market}: Failed to parse market summary")
|
||||
self._logger.debug(f"{type(e)}: {e}")
|
||||
|
||||
|
||||
try:
|
||||
# Unpack
|
||||
self._status = self._status['finance']['marketTimes'][0]['marketTime'][0]
|
||||
self._status['timezone'] = self._status['timezone'][0]
|
||||
del self._status['time'] # redundant
|
||||
try:
|
||||
self._status.update({
|
||||
"open": dt.datetime.fromisoformat(self._status["open"]),
|
||||
"close": dt.datetime.fromisoformat(self._status["close"]),
|
||||
"tz": dt.timezone(dt.timedelta(hours=int(self._status["timezone"]["gmtoffset"]))/1000, self._status["timezone"]["short"])
|
||||
})
|
||||
except Exception as e:
|
||||
self._logger.error(f"{self.market}: Failed to update market status")
|
||||
self._logger.debug(f"{type(e)}: {e}")
|
||||
except Exception as e:
|
||||
self._logger.error(f"{self.market}: Failed to parse market status")
|
||||
self._logger.debug(f"{type(e)}: {e}")
|
||||
|
||||
|
||||
|
||||
|
||||
@property
|
||||
def status(self):
|
||||
self._parse_data()
|
||||
return self._status
|
||||
|
||||
|
||||
@property
|
||||
def summary(self):
|
||||
self._parse_data()
|
||||
return self._summary
|
||||
@@ -44,4 +44,10 @@ class YFInvalidPeriodError(YFException):
|
||||
self.ticker = ticker
|
||||
self.invalid_period = invalid_period
|
||||
self.valid_ranges = valid_ranges
|
||||
super().__init__(f"{self.ticker}: Period '{invalid_period}' is invalid, must be one of {valid_ranges}")
|
||||
super().__init__(f"{self.ticker}: Period '{invalid_period}' is invalid, "
|
||||
f"must be of the format {valid_ranges}, etc.")
|
||||
|
||||
|
||||
class YFRateLimitError(YFException):
|
||||
def __init__(self):
|
||||
super().__init__("Too Many Requests. Rate limited. Try after a while.")
|
||||
|
||||
@@ -36,7 +36,7 @@ from . import shared
|
||||
|
||||
@utils.log_indent_decorator
|
||||
def download(tickers, start=None, end=None, actions=False, threads=True,
|
||||
ignore_tz=None, group_by='column', auto_adjust=False, back_adjust=False,
|
||||
ignore_tz=None, group_by='column', auto_adjust=None, back_adjust=False,
|
||||
repair=False, keepna=False, progress=True, period="max", interval="1d",
|
||||
prepost=False, proxy=None, rounding=False, timeout=10, session=None,
|
||||
multi_level_index=True) -> Union[_pd.DataFrame, None]:
|
||||
@@ -65,7 +65,7 @@ def download(tickers, start=None, end=None, actions=False, threads=True,
|
||||
Include Pre and Post market data in results?
|
||||
Default is False
|
||||
auto_adjust: bool
|
||||
Adjust all OHLC automatically? Default is False
|
||||
Adjust all OHLC automatically? Default is True
|
||||
repair: bool
|
||||
Detect currency unit 100x mixups and attempt repair
|
||||
Default is False
|
||||
@@ -93,6 +93,11 @@ def download(tickers, start=None, end=None, actions=False, threads=True,
|
||||
"""
|
||||
logger = utils.get_yf_logger()
|
||||
|
||||
if auto_adjust is None:
|
||||
# Warn users that default has changed to True
|
||||
utils.print_once("YF.download() has changed argument auto_adjust default to True")
|
||||
auto_adjust = True
|
||||
|
||||
if logger.isEnabledFor(logging.DEBUG):
|
||||
if threads:
|
||||
# With DEBUG, each thread generates a lot of log messages.
|
||||
@@ -106,7 +111,7 @@ def download(tickers, start=None, end=None, actions=False, threads=True,
|
||||
|
||||
if ignore_tz is None:
|
||||
# Set default value depending on interval
|
||||
if interval[1:] in ['m', 'h']:
|
||||
if interval[-1] in ['m', 'h']:
|
||||
# Intraday
|
||||
ignore_tz = False
|
||||
else:
|
||||
@@ -180,7 +185,7 @@ def download(tickers, start=None, end=None, actions=False, threads=True,
|
||||
errors = {}
|
||||
for ticker in shared._ERRORS:
|
||||
err = shared._ERRORS[ticker]
|
||||
err = err.replace(f'{ticker}', '%ticker%')
|
||||
err = err.replace(f'${ticker}: ', '')
|
||||
if err not in errors:
|
||||
errors[err] = [ticker]
|
||||
else:
|
||||
@@ -192,7 +197,7 @@ def download(tickers, start=None, end=None, actions=False, threads=True,
|
||||
tbs = {}
|
||||
for ticker in shared._TRACEBACKS:
|
||||
tb = shared._TRACEBACKS[ticker]
|
||||
tb = tb.replace(f'{ticker}', '%ticker%')
|
||||
tb = tb.replace(f'${ticker}: ', '')
|
||||
if tb not in tbs:
|
||||
tbs[tb] = [ticker]
|
||||
else:
|
||||
|
||||
@@ -28,6 +28,50 @@ class Analysis:
|
||||
self._eps_revisions = None
|
||||
self._growth_estimates = None
|
||||
|
||||
def _get_periodic_df(self, key) -> pd.DataFrame:
|
||||
if self._earnings_trend is None:
|
||||
self._fetch_earnings_trend()
|
||||
|
||||
data = []
|
||||
for item in self._earnings_trend[:4]:
|
||||
row = {'period': item['period']}
|
||||
for k, v in item[key].items():
|
||||
if not isinstance(v, dict) or len(v) == 0:
|
||||
continue
|
||||
row[k] = v['raw']
|
||||
data.append(row)
|
||||
if len(data) == 0:
|
||||
return pd.DataFrame()
|
||||
return pd.DataFrame(data).set_index('period')
|
||||
|
||||
@property
|
||||
def earnings_estimate(self) -> pd.DataFrame:
|
||||
if self._earnings_estimate is not None:
|
||||
return self._earnings_estimate
|
||||
self._earnings_estimate = self._get_periodic_df('earningsEstimate')
|
||||
return self._earnings_estimate
|
||||
|
||||
@property
|
||||
def revenue_estimate(self) -> pd.DataFrame:
|
||||
if self._revenue_estimate is not None:
|
||||
return self._revenue_estimate
|
||||
self._revenue_estimate = self._get_periodic_df('revenueEstimate')
|
||||
return self._revenue_estimate
|
||||
|
||||
@property
|
||||
def eps_trend(self) -> pd.DataFrame:
|
||||
if self._eps_trend is not None:
|
||||
return self._eps_trend
|
||||
self._eps_trend = self._get_periodic_df('epsTrend')
|
||||
return self._eps_trend
|
||||
|
||||
@property
|
||||
def eps_revisions(self) -> pd.DataFrame:
|
||||
if self._eps_revisions is not None:
|
||||
return self._eps_revisions
|
||||
self._eps_revisions = self._get_periodic_df('epsRevisions')
|
||||
return self._eps_revisions
|
||||
|
||||
@property
|
||||
def analyst_price_targets(self) -> dict:
|
||||
if self._analyst_price_targets is not None:
|
||||
@@ -40,73 +84,17 @@ class Analysis:
|
||||
self._analyst_price_targets = {}
|
||||
return self._analyst_price_targets
|
||||
|
||||
keys = [
|
||||
('currentPrice', 'current'),
|
||||
('targetLowPrice', 'low'),
|
||||
('targetHighPrice', 'high'),
|
||||
('targetMeanPrice', 'mean'),
|
||||
('targetMedianPrice', 'median'),
|
||||
]
|
||||
result = {}
|
||||
for key, value in data.items():
|
||||
if key.startswith('target'):
|
||||
new_key = key.replace('target', '').lower().replace('price', '').strip()
|
||||
result[new_key] = value
|
||||
elif key == 'currentPrice':
|
||||
result['current'] = value
|
||||
|
||||
self._analyst_price_targets = {newKey: data.get(oldKey, None) for oldKey, newKey in keys}
|
||||
self._analyst_price_targets = result
|
||||
return self._analyst_price_targets
|
||||
|
||||
@property
|
||||
def earnings_estimate(self) -> pd.DataFrame:
|
||||
if self._earnings_estimate is not None:
|
||||
return self._earnings_estimate
|
||||
|
||||
if self._earnings_trend is None:
|
||||
self._fetch_earnings_trend()
|
||||
|
||||
data_dict = {
|
||||
'numberOfAnalysts': [],
|
||||
'avg': [],
|
||||
'low': [],
|
||||
'high': [],
|
||||
'yearAgoEps': [],
|
||||
'growth': []
|
||||
}
|
||||
periods = []
|
||||
|
||||
for item in self._earnings_trend[:4]:
|
||||
periods.append(item['period'])
|
||||
earnings_estimate = item.get('earningsEstimate', {})
|
||||
|
||||
for key in data_dict.keys():
|
||||
data_dict[key].append(earnings_estimate.get(key, {}).get('raw', None))
|
||||
|
||||
self._earnings_estimate = pd.DataFrame(data_dict, index=periods)
|
||||
return self._earnings_estimate
|
||||
|
||||
@property
|
||||
def revenue_estimate(self) -> pd.DataFrame:
|
||||
if self._revenue_estimate is not None:
|
||||
return self._revenue_estimate
|
||||
|
||||
if self._earnings_trend is None:
|
||||
self._fetch_earnings_trend()
|
||||
|
||||
data_dict = {
|
||||
'numberOfAnalysts': [],
|
||||
'avg': [],
|
||||
'low': [],
|
||||
'high': [],
|
||||
'yearAgoRevenue': [],
|
||||
'growth': []
|
||||
}
|
||||
periods = []
|
||||
|
||||
for item in self._earnings_trend[:4]:
|
||||
periods.append(item['period'])
|
||||
revenue_estimate = item.get('revenueEstimate', {})
|
||||
|
||||
for key in data_dict.keys():
|
||||
data_dict[key].append(revenue_estimate.get(key, {}).get('raw', None))
|
||||
|
||||
self._revenue_estimate = pd.DataFrame(data_dict, index=periods)
|
||||
return self._revenue_estimate
|
||||
|
||||
@property
|
||||
def earnings_history(self) -> pd.DataFrame:
|
||||
if self._earnings_history is not None:
|
||||
@@ -119,77 +107,27 @@ class Analysis:
|
||||
self._earnings_history = pd.DataFrame()
|
||||
return self._earnings_history
|
||||
|
||||
data_dict = {
|
||||
'epsEstimate': [],
|
||||
'epsActual': [],
|
||||
'epsDifference': [],
|
||||
'surprisePercent': []
|
||||
}
|
||||
quarters = []
|
||||
|
||||
rows = []
|
||||
for item in data:
|
||||
quarters.append(item.get('quarter', {}).get('fmt', None))
|
||||
row = {'quarter': item.get('quarter', {}).get('fmt', None)}
|
||||
for k, v in item.items():
|
||||
if k == 'quarter':
|
||||
continue
|
||||
if not isinstance(v, dict) or len(v) == 0:
|
||||
continue
|
||||
row[k] = v.get('raw', None)
|
||||
rows.append(row)
|
||||
if len(data) == 0:
|
||||
return pd.DataFrame()
|
||||
|
||||
for key in data_dict.keys():
|
||||
data_dict[key].append(item.get(key, {}).get('raw', None))
|
||||
|
||||
datetime_index = pd.to_datetime(quarters, format='%Y-%m-%d')
|
||||
self._earnings_history = pd.DataFrame(data_dict, index=datetime_index)
|
||||
df = pd.DataFrame(rows)
|
||||
if 'quarter' in df.columns:
|
||||
df['quarter'] = pd.to_datetime(df['quarter'], format='%Y-%m-%d')
|
||||
df.set_index('quarter', inplace=True)
|
||||
|
||||
self._earnings_history = df
|
||||
return self._earnings_history
|
||||
|
||||
@property
|
||||
def eps_trend(self) -> pd.DataFrame:
|
||||
if self._eps_trend is not None:
|
||||
return self._eps_trend
|
||||
|
||||
if self._earnings_trend is None:
|
||||
self._fetch_earnings_trend()
|
||||
|
||||
data_dict = {
|
||||
'current': [],
|
||||
'7daysAgo': [],
|
||||
'30daysAgo': [],
|
||||
'60daysAgo': [],
|
||||
'90daysAgo': []
|
||||
}
|
||||
periods = []
|
||||
|
||||
for item in self._earnings_trend[:4]:
|
||||
periods.append(item['period'])
|
||||
eps_trend = item.get('epsTrend', {})
|
||||
|
||||
for key in data_dict.keys():
|
||||
data_dict[key].append(eps_trend.get(key, {}).get('raw', None))
|
||||
|
||||
self._eps_trend = pd.DataFrame(data_dict, index=periods)
|
||||
return self._eps_trend
|
||||
|
||||
@property
|
||||
def eps_revisions(self) -> pd.DataFrame:
|
||||
if self._eps_revisions is not None:
|
||||
return self._eps_revisions
|
||||
|
||||
if self._earnings_trend is None:
|
||||
self._fetch_earnings_trend()
|
||||
|
||||
data_dict = {
|
||||
'upLast7days': [],
|
||||
'upLast30days': [],
|
||||
'downLast7days': [],
|
||||
'downLast30days': []
|
||||
}
|
||||
periods = []
|
||||
|
||||
for item in self._earnings_trend[:4]:
|
||||
periods.append(item['period'])
|
||||
eps_revisions = item.get('epsRevisions', {})
|
||||
|
||||
for key in data_dict.keys():
|
||||
data_dict[key].append(eps_revisions.get(key, {}).get('raw', None))
|
||||
|
||||
self._eps_revisions = pd.DataFrame(data_dict, index=periods)
|
||||
return self._eps_revisions
|
||||
|
||||
@property
|
||||
def growth_estimates(self) -> pd.DataFrame:
|
||||
if self._growth_estimates is not None:
|
||||
@@ -205,48 +143,26 @@ class Analysis:
|
||||
self._growth_estimates = pd.DataFrame()
|
||||
return self._growth_estimates
|
||||
|
||||
# LTG is not defined in yahoo finance front-end as at 2024-11-14.
|
||||
# But its addition is breaking the retrieval of growth estimates.
|
||||
# Also, support for 5 year seem to have dropped.
|
||||
# TODO: Revisit this change and consider permanently removing these keys.
|
||||
data_dict = {
|
||||
'0q': [],
|
||||
'+1q': [],
|
||||
'0y': [],
|
||||
'+1y': [],
|
||||
# 'LTG': [],
|
||||
# '+5y': [],
|
||||
# '-5y': []
|
||||
}
|
||||
|
||||
# make sure no column is empty
|
||||
dummy_trend = [{'period': key, 'growth': None} for key in data_dict.keys()]
|
||||
industry_trend = trends['industryTrend']['estimates'] or dummy_trend
|
||||
sector_trend = trends['sectorTrend']['estimates'] or dummy_trend
|
||||
index_trend = trends['indexTrend']['estimates'] or dummy_trend
|
||||
|
||||
data = []
|
||||
for item in self._earnings_trend:
|
||||
period = item['period']
|
||||
if period in data_dict:
|
||||
data_dict[period].append(item.get('growth', {}).get('raw', None))
|
||||
row = {'period': period, 'stockTrend': item.get('growth', {}).get('raw', None)}
|
||||
data.append(row)
|
||||
|
||||
for item in industry_trend:
|
||||
period = item['period']
|
||||
if period in data_dict:
|
||||
data_dict[period].append(item.get('growth', None))
|
||||
for trend_name, trend_info in trends.items():
|
||||
if trend_info.get('estimates'):
|
||||
for estimate in trend_info['estimates']:
|
||||
period = estimate['period']
|
||||
existing_row = next((row for row in data if row['period'] == period), None)
|
||||
if existing_row:
|
||||
existing_row[trend_name] = estimate.get('growth')
|
||||
else:
|
||||
row = {'period': period, trend_name: estimate.get('growth')}
|
||||
data.append(row)
|
||||
if len(data) == 0:
|
||||
return pd.DataFrame()
|
||||
|
||||
for item in sector_trend:
|
||||
period = item['period']
|
||||
if period in data_dict:
|
||||
data_dict[period].append(item.get('growth', None))
|
||||
|
||||
for item in index_trend:
|
||||
period = item['period']
|
||||
if period in data_dict:
|
||||
data_dict[period].append(item.get('growth', None))
|
||||
|
||||
cols = ['stock', 'industry', 'sector', 'index']
|
||||
self._growth_estimates = pd.DataFrame(data_dict, index=cols).T
|
||||
self._growth_estimates = pd.DataFrame(data).set_index('period').dropna(how='all')
|
||||
return self._growth_estimates
|
||||
|
||||
# modified version from quote.py
|
||||
|
||||
@@ -9,7 +9,7 @@ import bisect
|
||||
|
||||
from yfinance import shared, utils
|
||||
from yfinance.const import _BASE_URL_, _PRICE_COLNAMES_
|
||||
from yfinance.exceptions import YFInvalidPeriodError, YFPricesMissingError, YFTzMissingError
|
||||
from yfinance.exceptions import YFInvalidPeriodError, YFPricesMissingError, YFTzMissingError, YFRateLimitError
|
||||
|
||||
class PriceHistory:
|
||||
def __init__(self, data, ticker, tz, session=None, proxy=None):
|
||||
@@ -78,7 +78,7 @@ class PriceHistory:
|
||||
|
||||
interval_user = interval
|
||||
period_user = period
|
||||
if repair and interval in ['5d', '1wk', '1mo', '3mo']:
|
||||
if repair and interval in ["5d", "1wk", "1mo", "3mo"]:
|
||||
# Yahoo's way of adjusting mutiday intervals is fundamentally broken.
|
||||
# Have to fetch 1d, adjust, then resample.
|
||||
if interval == '5d':
|
||||
@@ -184,6 +184,9 @@ class PriceHistory:
|
||||
"the issue. Thank you for your patience.")
|
||||
|
||||
data = data.json()
|
||||
# Special case for rate limits
|
||||
except YFRateLimitError:
|
||||
raise
|
||||
except Exception:
|
||||
if raise_errors:
|
||||
raise
|
||||
@@ -229,10 +232,9 @@ class PriceHistory:
|
||||
elif "chart" not in data or data["chart"]["result"] is None or not data["chart"]["result"] or not data["chart"]["result"][0]["indicators"]["quote"][0]:
|
||||
_exception = YFPricesMissingError(self.ticker, _price_data_debug)
|
||||
fail = True
|
||||
elif period is not None and period not in self._history_metadata["validRanges"]:
|
||||
# even if timestamp is in the data, the data doesn't encompass the period requested
|
||||
# User provided a bad period. The minimum should be '1d', but sometimes Yahoo accepts '1h'.
|
||||
_exception = YFInvalidPeriodError(self.ticker, period, self._history_metadata['validRanges'])
|
||||
elif period and period not in self._history_metadata['validRanges'] and not utils.is_valid_period_format(period):
|
||||
# User provided a bad period
|
||||
_exception = YFInvalidPeriodError(self.ticker, period, ", ".join(self._history_metadata['validRanges']))
|
||||
fail = True
|
||||
|
||||
if fail:
|
||||
@@ -247,6 +249,13 @@ class PriceHistory:
|
||||
self._reconstruct_start_interval = None
|
||||
return utils.empty_df()
|
||||
|
||||
# Process custom periods
|
||||
if period and period not in self._history_metadata.get("validRanges", []):
|
||||
end = int(_time.time())
|
||||
start = _datetime.date.fromtimestamp(end)
|
||||
start -= utils._interval_to_timedelta(period)
|
||||
start -= _datetime.timedelta(days=4)
|
||||
|
||||
# parse quotes
|
||||
quotes = utils.parse_quotes(data["chart"]["result"][0])
|
||||
# Yahoo bug fix - it often appends latest price even if after end date
|
||||
@@ -1295,7 +1304,7 @@ class PriceHistory:
|
||||
|
||||
if df is None or df.empty:
|
||||
return df
|
||||
if interval != '1d':
|
||||
if interval in ['1wk', '1mo', '3mo', '1y']:
|
||||
return df
|
||||
|
||||
logger = utils.get_yf_logger()
|
||||
@@ -1605,9 +1614,9 @@ class PriceHistory:
|
||||
checks += ['adj_missing', 'adj_exceeds_div', 'div_exceeds_adj']
|
||||
|
||||
div_status_df['phantom'] = False
|
||||
phantom_proximity_threshold = _datetime.timedelta(days=7)
|
||||
phantom_proximity_threshold = _datetime.timedelta(days=17)
|
||||
f = div_status_df[['div_too_big', 'div_exceeds_adj']].any(axis=1)
|
||||
if f.any():
|
||||
if f.any() and len(div_status_df) > 1:
|
||||
# One/some of these may be phantom dividends. Clue is if another correct dividend is very close
|
||||
indices = np.where(f)[0]
|
||||
dts_to_check = div_status_df.index[f]
|
||||
@@ -1616,37 +1625,24 @@ class PriceHistory:
|
||||
div_dt = div.name
|
||||
phantom_dt = None
|
||||
if i > 0:
|
||||
prev_div = div_status_df.iloc[i-1]
|
||||
ratio1 = (div['div']/currency_divide) / prev_div['div']
|
||||
ratio2 = div['div'] / prev_div['div']
|
||||
divergence = min(abs(ratio1-1.0), abs(ratio2-1.0))
|
||||
if abs(div_dt-prev_div.name) <= phantom_proximity_threshold and not prev_div['phantom'] and divergence < 0.01:
|
||||
if prev_div.name in dts_to_check:
|
||||
# Both this and previous are anomalous, so mark smallest drop as phantom
|
||||
drop = div['drop']
|
||||
drop_prev = prev_div['drop']
|
||||
if drop > 1.5*drop_prev:
|
||||
phantom_dt = prev_div.name
|
||||
else:
|
||||
phantom_dt = div_dt
|
||||
else:
|
||||
phantom_dt = div_dt
|
||||
elif i < len(div_status_df)-1:
|
||||
next_div = div_status_df.iloc[i+1]
|
||||
ratio1 = (div['div']/currency_divide) / next_div['div']
|
||||
ratio2 = div['div'] / next_div['div']
|
||||
divergence = min(abs(ratio1-1.0), abs(ratio2-1.0))
|
||||
if abs(div_dt-next_div.name) <= phantom_proximity_threshold and divergence < 0.01:
|
||||
if next_div.name in dts_to_check:
|
||||
# Both this and previous are anomalous, so mark smallest drop as phantom
|
||||
drop = div['drop']
|
||||
drop_next = next_div['drop']
|
||||
if drop > 1.5*drop_next:
|
||||
phantom_dt = next_div.name
|
||||
else:
|
||||
phantom_dt = div_dt
|
||||
other_div = div_status_df.iloc[i-1]
|
||||
else:
|
||||
other_div = div_status_df.iloc[i+1]
|
||||
ratio1 = (div['div']/currency_divide) / other_div['div']
|
||||
ratio2 = div['div'] / other_div['div']
|
||||
divergence = min(abs(ratio1-1.0), abs(ratio2-1.0))
|
||||
if abs(div_dt-other_div.name) <= phantom_proximity_threshold and not other_div['phantom'] and divergence < 0.01:
|
||||
if other_div.name in dts_to_check:
|
||||
# Both this and previous are anomalous, so mark smallest drop as phantom
|
||||
drop = div['drop']
|
||||
drop_next = other_div['drop']
|
||||
if drop > 1.5*drop_next:
|
||||
phantom_dt = other_div.name
|
||||
else:
|
||||
phantom_dt = div_dt
|
||||
else:
|
||||
phantom_dt = div_dt
|
||||
|
||||
if phantom_dt:
|
||||
div_status_df.loc[phantom_dt, 'phantom'] = True
|
||||
for c in checks:
|
||||
@@ -1745,7 +1741,7 @@ class PriceHistory:
|
||||
lookahead_idx = bisect.bisect_left(df2.index, lookahead_date)
|
||||
lookahead_idx = min(lookahead_idx, len(df2)-1)
|
||||
# In rare cases, the price dropped 1 day before dividend (DVD.OL @ 2024-05-15)
|
||||
lookback_idx = div_idx-2 if div_idx > 1 else div_idx-1
|
||||
lookback_idx = max(0, div_idx-14)
|
||||
# Check for bad stock splits in the lookahead period -
|
||||
# if present, reduce lookahead to before.
|
||||
future_changes = df2['Close'].iloc[div_idx:lookahead_idx+1].pct_change()
|
||||
@@ -1767,8 +1763,6 @@ class PriceHistory:
|
||||
adjDeltas = x['Adj Low'].iloc[1:].to_numpy() - x['Adj Close'].iloc[:-1].to_numpy()
|
||||
adjDeltas = np.append([0.0], adjDeltas)
|
||||
x['adjDelta'] = adjDeltas
|
||||
for i in np.where(x['Dividends']>0)[0]:
|
||||
x.loc[x.index[i], 'adjDelta'] += x['Dividends'].iloc[i]*x['Adj'].iloc[i]
|
||||
deltas = x[['delta', 'adjDelta']]
|
||||
if div_pct > 0.05 and div_pct < 1.0:
|
||||
adjDiv = div * x['Adj'].iloc[0]
|
||||
@@ -1903,7 +1897,7 @@ class PriceHistory:
|
||||
pct_fail = n_fail / n
|
||||
if c == 'div_too_big':
|
||||
true_threshold = 1.0
|
||||
fals_threshold = 0.2
|
||||
fals_threshold = 0.25
|
||||
|
||||
if 'div_date_wrong' in cluster.columns and (cluster[c] == cluster['div_date_wrong']).all():
|
||||
continue
|
||||
@@ -1982,7 +1976,7 @@ class PriceHistory:
|
||||
if c == 'div_date_wrong':
|
||||
# Fine, these should be rare
|
||||
continue
|
||||
if c == 'div_pre_split':
|
||||
if c in ['div_pre_split', 'div_too_big_and_pre_split']:
|
||||
# Fine, these should be rare
|
||||
continue
|
||||
|
||||
@@ -2218,6 +2212,26 @@ class PriceHistory:
|
||||
df2_nan.loc[:enddt, 'Repaired?'] = True
|
||||
cluster.loc[dt, 'Fixed?'] = True
|
||||
|
||||
elif n_failed_checks == 3:
|
||||
if div_too_big and div_exceeds_adj and div_pre_split:
|
||||
k = 'too-big div & pre-split'
|
||||
correction = (1.0/currency_divide) * (1.0/df2['Stock Splits'].loc[dt])
|
||||
correct_div = row['div'] * correction
|
||||
df2.loc[dt, 'Dividends'] = correct_div
|
||||
|
||||
target_div_pct = row['%'] * correction
|
||||
target_adj = 1.0 - target_div_pct
|
||||
present_adj = row['present adj']
|
||||
# Also correct adjustment to match corrected dividend
|
||||
k += ' & div-adjust'
|
||||
adj_correction = target_adj / present_adj
|
||||
df2.loc[ :enddt, 'Adj Close'] *= adj_correction
|
||||
df2.loc[ :enddt, 'Repaired?'] = True
|
||||
df2_nan.loc[:enddt, 'Adj Close'] *= adj_correction
|
||||
df2_nan.loc[:enddt, 'Repaired?'] = True
|
||||
cluster.loc[dt, 'Fixed?'] = True
|
||||
div_repairs.setdefault(k, []).append(dt)
|
||||
|
||||
if cluster.empty:
|
||||
continue
|
||||
|
||||
@@ -2473,14 +2487,14 @@ class PriceHistory:
|
||||
|
||||
r = _1d_change_x / split_rcp
|
||||
f_down = _1d_change_x < 1.0 / threshold
|
||||
if f_down.any():
|
||||
# Discard where triggered by negative Adj Close after dividend
|
||||
f_neg = _1d_change_x < 0.0
|
||||
f_div = (df2['Dividends']>0).to_numpy()
|
||||
f_div_before = np.roll(f_div, 1)
|
||||
if f_down.ndim == 2:
|
||||
f_div_before = f_div_before[:, np.newaxis].repeat(f_down.shape[1], axis=1)
|
||||
f_down = f_down & ~(f_neg + f_div_before)
|
||||
# if f_down.any():
|
||||
# # Discard where triggered by negative Adj Close after dividend
|
||||
# f_neg = _1d_change_x < 0.0
|
||||
# f_div = (df2['Dividends']>0).to_numpy()
|
||||
# f_div_before = np.roll(f_div, 1)
|
||||
# if f_down.ndim == 2:
|
||||
# f_div_before = f_div_before[:, np.newaxis].repeat(f_down.shape[1], axis=1)
|
||||
# f_down = f_down & ~(f_neg + f_div_before)
|
||||
f_up = _1d_change_x > threshold
|
||||
f_up_ndims = len(f_up.shape)
|
||||
f_up_shifts = f_up if f_up_ndims==1 else f_up.any(axis=1)
|
||||
@@ -2503,7 +2517,7 @@ class PriceHistory:
|
||||
# assume false positive
|
||||
continue
|
||||
avg_vol_after = df2['Volume'].iloc[lookback:i-1].mean()
|
||||
if not np.isnan(avg_vol_after) and v/avg_vol_after < 2.0:
|
||||
if not np.isnan(avg_vol_after) and avg_vol_after > 0 and v/avg_vol_after < 2.0:
|
||||
# volume spike is actually a step-change, so
|
||||
# probably missing stock split
|
||||
continue
|
||||
|
||||
@@ -1,5 +1,3 @@
|
||||
# from io import StringIO
|
||||
|
||||
import pandas as pd
|
||||
import requests
|
||||
|
||||
@@ -8,7 +6,7 @@ from yfinance.data import YfData
|
||||
from yfinance.const import _BASE_URL_
|
||||
from yfinance.exceptions import YFDataException
|
||||
|
||||
_QUOTE_SUMMARY_URL_ = f"{_BASE_URL_}/v10/finance/quoteSummary/"
|
||||
_QUOTE_SUMMARY_URL_ = f"{_BASE_URL_}/v10/finance/quoteSummary"
|
||||
|
||||
|
||||
class Holders:
|
||||
@@ -31,42 +29,36 @@ class Holders:
|
||||
@property
|
||||
def major(self) -> pd.DataFrame:
|
||||
if self._major is None:
|
||||
# self._scrape(self.proxy)
|
||||
self._fetch_and_parse()
|
||||
return self._major
|
||||
|
||||
@property
|
||||
def institutional(self) -> pd.DataFrame:
|
||||
if self._institutional is None:
|
||||
# self._scrape(self.proxy)
|
||||
self._fetch_and_parse()
|
||||
return self._institutional
|
||||
|
||||
@property
|
||||
def mutualfund(self) -> pd.DataFrame:
|
||||
if self._mutualfund is None:
|
||||
# self._scrape(self.proxy)
|
||||
self._fetch_and_parse()
|
||||
return self._mutualfund
|
||||
|
||||
@property
|
||||
def insider_transactions(self) -> pd.DataFrame:
|
||||
if self._insider_transactions is None:
|
||||
# self._scrape_insider_transactions(self.proxy)
|
||||
self._fetch_and_parse()
|
||||
return self._insider_transactions
|
||||
|
||||
@property
|
||||
def insider_purchases(self) -> pd.DataFrame:
|
||||
if self._insider_purchases is None:
|
||||
# self._scrape_insider_transactions(self.proxy)
|
||||
self._fetch_and_parse()
|
||||
return self._insider_purchases
|
||||
|
||||
@property
|
||||
def insider_roster(self) -> pd.DataFrame:
|
||||
if self._insider_roster is None:
|
||||
# self._scrape_insider_ros(self.proxy)
|
||||
self._fetch_and_parse()
|
||||
return self._insider_roster
|
||||
|
||||
@@ -187,8 +179,10 @@ class Holders:
|
||||
del owner["maxAge"]
|
||||
df = pd.DataFrame(holders)
|
||||
if not df.empty:
|
||||
df["positionDirectDate"] = pd.to_datetime(df["positionDirectDate"], unit="s")
|
||||
df["latestTransDate"] = pd.to_datetime(df["latestTransDate"], unit="s")
|
||||
if "positionDirectDate" in df:
|
||||
df["positionDirectDate"] = pd.to_datetime(df["positionDirectDate"], unit="s")
|
||||
if "latestTransDate" in df:
|
||||
df["latestTransDate"] = pd.to_datetime(df["latestTransDate"], unit="s")
|
||||
|
||||
df.rename(columns={
|
||||
"name": "Name",
|
||||
|
||||
@@ -7,7 +7,7 @@ import requests
|
||||
|
||||
from yfinance import utils
|
||||
from yfinance.data import YfData
|
||||
from yfinance.const import quote_summary_valid_modules, _BASE_URL_
|
||||
from yfinance.const import quote_summary_valid_modules, _BASE_URL_, _QUERY1_URL_
|
||||
from yfinance.exceptions import YFDataException, YFException
|
||||
|
||||
info_retired_keys_price = {"currentPrice", "dayHigh", "dayLow", "open", "previousClose", "volume", "volume24Hr"}
|
||||
@@ -590,33 +590,56 @@ class Quote:
|
||||
return None
|
||||
return result
|
||||
|
||||
def _fetch_additional_info(self, proxy):
|
||||
params_dict = {"symbols": self._symbol, "formatted": "false"}
|
||||
try:
|
||||
result = self._data.get_raw_json(f"{_QUERY1_URL_}/v7/finance/quote?",
|
||||
user_agent_headers=self._data.user_agent_headers,
|
||||
params=params_dict, proxy=proxy)
|
||||
except requests.exceptions.HTTPError as e:
|
||||
utils.get_yf_logger().error(str(e))
|
||||
return None
|
||||
return result
|
||||
|
||||
def _fetch_info(self, proxy):
|
||||
if self._already_fetched:
|
||||
return
|
||||
self._already_fetched = True
|
||||
modules = ['financialData', 'quoteType', 'defaultKeyStatistics', 'assetProfile', 'summaryDetail']
|
||||
result = self._fetch(proxy, modules=modules)
|
||||
result.update(self._fetch_additional_info(proxy))
|
||||
if result is None:
|
||||
self._info = {}
|
||||
return
|
||||
|
||||
result["quoteSummary"]["result"][0]["symbol"] = self._symbol
|
||||
query1_info = next(
|
||||
(info for info in result.get("quoteSummary", {}).get("result", []) if info["symbol"] == self._symbol),
|
||||
None,
|
||||
)
|
||||
# Most keys that appear in multiple dicts have same value. Except 'maxAge' because
|
||||
# Yahoo not consistent with days vs seconds. Fix it here:
|
||||
for k in query1_info:
|
||||
if "maxAge" in query1_info[k] and query1_info[k]["maxAge"] == 1:
|
||||
query1_info[k]["maxAge"] = 86400
|
||||
query1_info = {
|
||||
k1: v1
|
||||
for k, v in query1_info.items()
|
||||
if isinstance(v, dict)
|
||||
for k1, v1 in v.items()
|
||||
if v1
|
||||
}
|
||||
query1_info = {}
|
||||
for quote in ["quoteSummary", "quoteResponse"]:
|
||||
if quote in result:
|
||||
result[quote]["result"][0]["symbol"] = self._symbol
|
||||
query_info = next(
|
||||
(info for info in result.get(quote, {}).get("result", [])
|
||||
if info["symbol"] == self._symbol),
|
||||
None,
|
||||
)
|
||||
if query_info:
|
||||
query1_info.update(query_info)
|
||||
|
||||
# Normalize and flatten nested dictionaries while converting maxAge from days (1) to seconds (86400).
|
||||
# This handles Yahoo Finance API inconsistency where maxAge is sometimes expressed in days instead of seconds.
|
||||
processed_info = {}
|
||||
for k, v in query1_info.items():
|
||||
|
||||
# Handle nested dictionary
|
||||
if isinstance(v, dict):
|
||||
for k1, v1 in v.items():
|
||||
if v1 is not None:
|
||||
processed_info[k1] = 86400 if k1 == "maxAge" and v1 == 1 else v1
|
||||
|
||||
elif v is not None:
|
||||
processed_info[k] = v
|
||||
|
||||
query1_info = processed_info
|
||||
|
||||
# recursively format but only because of 'companyOfficers'
|
||||
|
||||
def _format(k, v):
|
||||
@@ -631,9 +654,8 @@ class Quote:
|
||||
else:
|
||||
v2 = v
|
||||
return v2
|
||||
for k, v in query1_info.items():
|
||||
query1_info[k] = _format(k, v)
|
||||
self._info = query1_info
|
||||
|
||||
self._info = {k: _format(k, v) for k, v in query1_info.items()}
|
||||
|
||||
def _fetch_complementary(self, proxy):
|
||||
if self._already_fetched_complementary:
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
from .screener import Screener
|
||||
from .screener_query import EquityQuery
|
||||
from .query import EquityQuery
|
||||
from .screener import screen, PREDEFINED_SCREENER_QUERIES
|
||||
|
||||
__all__ = ['EquityQuery', 'Screener']
|
||||
__all__ = ['EquityQuery', 'FundQuery', 'screen', 'PREDEFINED_SCREENER_QUERIES']
|
||||
|
||||
218
yfinance/screener/query.py
Normal file
218
yfinance/screener/query.py
Normal file
@@ -0,0 +1,218 @@
|
||||
from abc import ABC, abstractmethod
|
||||
import numbers
|
||||
from typing import List, Union, Dict, TypeVar, Tuple
|
||||
|
||||
from yfinance.const import EQUITY_SCREENER_EQ_MAP, EQUITY_SCREENER_FIELDS
|
||||
from yfinance.const import FUND_SCREENER_EQ_MAP, FUND_SCREENER_FIELDS
|
||||
from yfinance.exceptions import YFNotImplementedError
|
||||
from ..utils import dynamic_docstring, generate_list_table_from_dict_universal
|
||||
|
||||
T = TypeVar('T', bound=Union[str, numbers.Real])
|
||||
|
||||
class QueryBase(ABC):
|
||||
def __init__(self, operator: str, operand: Union[ List['QueryBase'], Tuple[str, Tuple[Union[str, numbers.Real], ...]] ]):
|
||||
operator = operator.upper()
|
||||
|
||||
if not isinstance(operand, list):
|
||||
raise TypeError('Invalid operand type')
|
||||
if len(operand) <= 0:
|
||||
raise ValueError('Invalid field for EquityQuery')
|
||||
|
||||
if operator == 'IS-IN':
|
||||
self._validate_isin_operand(operand)
|
||||
elif operator in {'OR','AND'}:
|
||||
self._validate_or_and_operand(operand)
|
||||
elif operator == 'EQ':
|
||||
self._validate_eq_operand(operand)
|
||||
elif operator == 'BTWN':
|
||||
self._validate_btwn_operand(operand)
|
||||
elif operator in {'GT','LT','GTE','LTE'}:
|
||||
self._validate_gt_lt(operand)
|
||||
else:
|
||||
raise ValueError('Invalid Operator Value')
|
||||
|
||||
self.operator = operator
|
||||
self.operands = operand
|
||||
|
||||
@property
|
||||
@abstractmethod
|
||||
def valid_fields(self) -> List:
|
||||
raise YFNotImplementedError('valid_fields() needs to be implemented by child')
|
||||
|
||||
@property
|
||||
@abstractmethod
|
||||
def valid_values(self) -> Dict:
|
||||
raise YFNotImplementedError('valid_values() needs to be implemented by child')
|
||||
|
||||
def _validate_or_and_operand(self, operand: List['QueryBase']) -> None:
|
||||
if len(operand) <= 1:
|
||||
raise ValueError('Operand must be length longer than 1')
|
||||
if all(isinstance(e, QueryBase) for e in operand) is False:
|
||||
raise TypeError(f'Operand must be type {type(self)} for OR/AND')
|
||||
|
||||
def _validate_eq_operand(self, operand: List[Union[str, numbers.Real]]) -> None:
|
||||
if len(operand) != 2:
|
||||
raise ValueError('Operand must be length 2 for EQ')
|
||||
|
||||
if not any(operand[0] in fields_by_type for fields_by_type in self.valid_fields.values()):
|
||||
raise ValueError(f'Invalid field for {type(self)} "{operand[0]}"')
|
||||
if operand[0] in self.valid_values:
|
||||
vv = self.valid_values[operand[0]]
|
||||
if isinstance(vv, dict):
|
||||
# this data structure is slightly different to generate better docs,
|
||||
# need to unpack here.
|
||||
vv = set().union(*[e for e in vv.values()])
|
||||
if operand[1] not in vv:
|
||||
raise ValueError(f'Invalid EQ value "{operand[1]}"')
|
||||
|
||||
def _validate_btwn_operand(self, operand: List[Union[str, numbers.Real]]) -> None:
|
||||
if len(operand) != 3:
|
||||
raise ValueError('Operand must be length 3 for BTWN')
|
||||
if not any(operand[0] in fields_by_type for fields_by_type in self.valid_fields.values()):
|
||||
raise ValueError(f'Invalid field for {type(self)}')
|
||||
if isinstance(operand[1], numbers.Real) is False:
|
||||
raise TypeError('Invalid comparison type for BTWN')
|
||||
if isinstance(operand[2], numbers.Real) is False:
|
||||
raise TypeError('Invalid comparison type for BTWN')
|
||||
|
||||
def _validate_gt_lt(self, operand: List[Union[str, numbers.Real]]) -> None:
|
||||
if len(operand) != 2:
|
||||
raise ValueError('Operand must be length 2 for GT/LT')
|
||||
if not any(operand[0] in fields_by_type for fields_by_type in self.valid_fields.values()):
|
||||
raise ValueError(f'Invalid field for {type(self)} "{operand[0]}"')
|
||||
if isinstance(operand[1], numbers.Real) is False:
|
||||
raise TypeError('Invalid comparison type for GT/LT')
|
||||
|
||||
def _validate_isin_operand(self, operand: List['QueryBase']) -> None:
|
||||
if len(operand) < 2:
|
||||
raise ValueError('Operand must be length 2+ for IS-IN')
|
||||
|
||||
if not any(operand[0] in fields_by_type for fields_by_type in self.valid_fields.values()):
|
||||
raise ValueError(f'Invalid field for {type(self)} "{operand[0]}"')
|
||||
if operand[0] in self.valid_values:
|
||||
vv = self.valid_values[operand[0]]
|
||||
if isinstance(vv, dict):
|
||||
# this data structure is slightly different to generate better docs,
|
||||
# need to unpack here.
|
||||
vv = set().union(*[e for e in vv.values()])
|
||||
for i in range(1, len(operand)):
|
||||
if operand[i] not in vv:
|
||||
raise ValueError(f'Invalid EQ value "{operand[i]}"')
|
||||
|
||||
def to_dict(self) -> Dict:
|
||||
op = self.operator
|
||||
ops = self.operands
|
||||
if self.operator == 'IS-IN':
|
||||
# Expand to OR of EQ queries
|
||||
op = 'OR'
|
||||
ops = [type(self)('EQ', [self.operands[0], v]) for v in self.operands[1:]]
|
||||
return {
|
||||
"operator": op,
|
||||
"operands": [o.to_dict() if isinstance(o, QueryBase) else o for o in ops]
|
||||
}
|
||||
|
||||
def __repr__(self, indent=0) -> str:
|
||||
indent_str = " " * indent
|
||||
class_name = self.__class__.__name__
|
||||
|
||||
if isinstance(self.operands, list):
|
||||
# For list operands, check if they contain any QueryBase objects
|
||||
if any(isinstance(op, QueryBase) for op in self.operands):
|
||||
# If there are nested queries, format them with newlines
|
||||
operands_str = ",\n".join(
|
||||
f"{indent_str} {op.__repr__(indent + 1) if isinstance(op, QueryBase) else repr(op)}"
|
||||
for op in self.operands
|
||||
)
|
||||
return f"{class_name}({self.operator}, [\n{operands_str}\n{indent_str}])"
|
||||
else:
|
||||
# For lists of simple types, keep them on one line
|
||||
return f"{class_name}({self.operator}, {repr(self.operands)})"
|
||||
else:
|
||||
# Handle single operand
|
||||
return f"{class_name}({self.operator}, {repr(self.operands)})"
|
||||
|
||||
def __str__(self) -> str:
|
||||
return self.__repr__()
|
||||
|
||||
|
||||
class EquityQuery(QueryBase):
|
||||
"""
|
||||
The `EquityQuery` class constructs filters for stocks based on specific criteria such as region, sector, exchange, and peer group.
|
||||
|
||||
Start with value operations: `EQ` (equals), `IS-IN` (is in), `BTWN` (between), `GT` (greater than), `LT` (less than), `GTE` (greater or equal), `LTE` (less or equal).
|
||||
|
||||
Combine them with logical operations: `AND`, `OR`.
|
||||
|
||||
Example:
|
||||
Predefined Yahoo query `aggressive_small_caps`:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
from yfinance import EquityQuery
|
||||
|
||||
EquityQuery('and', [
|
||||
EquityQuery('is-in', ['exchange', 'NMS', 'NYQ']),
|
||||
EquityQuery('lt', ["epsgrowth.lasttwelvemonths", 15])
|
||||
])
|
||||
"""
|
||||
|
||||
@dynamic_docstring({"valid_operand_fields_table": generate_list_table_from_dict_universal(EQUITY_SCREENER_FIELDS)})
|
||||
@property
|
||||
def valid_fields(self) -> Dict:
|
||||
"""
|
||||
Valid operands, grouped by category.
|
||||
{valid_operand_fields_table}
|
||||
"""
|
||||
return EQUITY_SCREENER_FIELDS
|
||||
|
||||
@dynamic_docstring({"valid_values_table": generate_list_table_from_dict_universal(EQUITY_SCREENER_EQ_MAP, concat_keys=['exchange'])})
|
||||
@property
|
||||
def valid_values(self) -> Dict:
|
||||
"""
|
||||
Most operands take number values, but some have a restricted set of valid values.
|
||||
{valid_values_table}
|
||||
"""
|
||||
return EQUITY_SCREENER_EQ_MAP
|
||||
|
||||
|
||||
class FundQuery(QueryBase):
|
||||
"""
|
||||
The `FundQuery` class constructs filters for mutual funds based on specific criteria such as region, sector, exchange, and peer group.
|
||||
|
||||
Start with value operations: `EQ` (equals), `IS-IN` (is in), `BTWN` (between), `GT` (greater than), `LT` (less than), `GTE` (greater or equal), `LTE` (less or equal).
|
||||
|
||||
Combine them with logical operations: `AND`, `OR`.
|
||||
|
||||
Example:
|
||||
Predefined Yahoo query `solid_large_growth_funds`:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
from yfinance import FundQuery
|
||||
|
||||
FundQuery('and', [
|
||||
FundQuery('eq', ['categoryname', 'Large Growth']),
|
||||
FundQuery('is-in', ['performanceratingoverall', 4, 5]),
|
||||
FundQuery('lt', ['initialinvestment', 100001]),
|
||||
FundQuery('lt', ['annualreturnnavy1categoryrank', 50]),
|
||||
FundQuery('eq', ['exchange', 'NAS'])
|
||||
])
|
||||
"""
|
||||
@dynamic_docstring({"valid_operand_fields_table": generate_list_table_from_dict_universal(FUND_SCREENER_FIELDS)})
|
||||
@property
|
||||
def valid_fields(self) -> Dict:
|
||||
"""
|
||||
Valid operands, grouped by category.
|
||||
{valid_operand_fields_table}
|
||||
"""
|
||||
return FUND_SCREENER_FIELDS
|
||||
|
||||
@dynamic_docstring({"valid_values_table": generate_list_table_from_dict_universal(FUND_SCREENER_EQ_MAP)})
|
||||
@property
|
||||
def valid_values(self) -> Dict:
|
||||
"""
|
||||
Most operands take number values, but some have a restricted set of valid values.
|
||||
{valid_values_table}
|
||||
"""
|
||||
return FUND_SCREENER_EQ_MAP
|
||||
|
||||
@@ -1,188 +1,180 @@
|
||||
from typing import Dict
|
||||
from .query import EquityQuery as EqyQy
|
||||
from .query import FundQuery as FndQy
|
||||
from .query import QueryBase, EquityQuery, FundQuery
|
||||
|
||||
from yfinance import utils
|
||||
from yfinance.const import _BASE_URL_
|
||||
from yfinance.data import YfData
|
||||
from yfinance.const import _BASE_URL_, PREDEFINED_SCREENER_BODY_MAP
|
||||
from .screener_query import Query
|
||||
from ..utils import dynamic_docstring, generate_list_table_from_dict_of_dict
|
||||
|
||||
from ..utils import dynamic_docstring, generate_list_table_from_dict_universal
|
||||
|
||||
from typing import Union
|
||||
import requests
|
||||
|
||||
_SCREENER_URL_ = f"{_BASE_URL_}/v1/finance/screener"
|
||||
_PREDEFINED_URL_ = f"{_SCREENER_URL_}/predefined/saved"
|
||||
|
||||
class Screener:
|
||||
PREDEFINED_SCREENER_BODY_DEFAULTS = {
|
||||
"offset":0, "size":25, "userId":"","userIdType":"guid"
|
||||
}
|
||||
|
||||
PREDEFINED_SCREENER_QUERIES = {
|
||||
'aggressive_small_caps': {"sortField":"eodvolume", "sortType":"desc",
|
||||
"query": EqyQy('and', [EqyQy('is-in', ['exchange', 'NMS', 'NYQ']), EqyQy('lt', ["epsgrowth.lasttwelvemonths", 15])])},
|
||||
'day_gainers': {"sortField":"percentchange", "sortType":"DESC",
|
||||
"query": EqyQy('and', [EqyQy('gt', ['percentchange', 3]), EqyQy('eq', ['region', 'us']), EqyQy('gte', ['intradaymarketcap', 2000000000]), EqyQy('gte', ['intradayprice', 5]), EqyQy('gt', ['dayvolume', 15000])])},
|
||||
'day_losers': {"sortField":"percentchange", "sortType":"ASC",
|
||||
"query": EqyQy('and', [EqyQy('lt', ['percentchange', -2.5]), EqyQy('eq', ['region', 'us']), EqyQy('gte', ['intradaymarketcap', 2000000000]), EqyQy('gte', ['intradayprice', 5]), EqyQy('gt', ['dayvolume', 20000])])},
|
||||
'growth_technology_stocks': {"sortField":"eodvolume", "sortType":"desc",
|
||||
"query": EqyQy('and', [EqyQy('gte', ['quarterlyrevenuegrowth.quarterly', 25]), EqyQy('gte', ['epsgrowth.lasttwelvemonths', 25]), EqyQy('eq', ['sector', 'Technology']), EqyQy('is-in', ['exchange', 'NMS', 'NYQ'])])},
|
||||
'most_actives': {"sortField":"dayvolume", "sortType":"DESC",
|
||||
"query": EqyQy('and', [EqyQy('eq', ['region', 'us']), EqyQy('gte', ['intradaymarketcap', 2000000000]), EqyQy('gt', ['dayvolume', 5000000])])},
|
||||
'most_shorted_stocks': {"size":25, "offset":0, "sortField":"short_percentage_of_shares_outstanding.value", "sortType":"DESC",
|
||||
"query": EqyQy('and', [EqyQy('eq', ['region', 'us']), EqyQy('gt', ['intradayprice', 1]), EqyQy('gt', ['avgdailyvol3m', 200000])])},
|
||||
'small_cap_gainers': {"sortField":"eodvolume", "sortType":"desc",
|
||||
"query": EqyQy("and", [EqyQy("lt", ["intradaymarketcap",2000000000]), EqyQy("is-in", ["exchange", "NMS", "NYQ"])])},
|
||||
'undervalued_growth_stocks': {"sortType":"DESC", "sortField":"eodvolume",
|
||||
"query": EqyQy('and', [EqyQy('btwn', ['peratio.lasttwelvemonths', 0, 20]), EqyQy('lt', ['pegratio_5y', 1]), EqyQy('gte', ['epsgrowth.lasttwelvemonths', 25]), EqyQy('is-in', ['exchange', 'NMS', 'NYQ'])])},
|
||||
'undervalued_large_caps': {"sortField":"eodvolume", "sortType":"desc",
|
||||
"query": EqyQy('and', [EqyQy('btwn', ['peratio.lasttwelvemonths', 0, 20]), EqyQy('lt', ['pegratio_5y', 1]), EqyQy('btwn', ['intradaymarketcap', 10000000000, 100000000000]), EqyQy('is-in', ['exchange', 'NMS', 'NYQ'])])},
|
||||
'conservative_foreign_funds': {"sortType":"DESC", "sortField":"fundnetassets",
|
||||
"query": FndQy('and', [FndQy('is-in', ['categoryname', 'Foreign Large Value', 'Foreign Large Blend', 'Foreign Large Growth', 'Foreign Small/Mid Growth', 'Foreign Small/Mid Blend', 'Foreign Small/Mid Value']), FndQy('is-in', ['performanceratingoverall', 4, 5]), FndQy('lt', ['initialinvestment', 100001]), FndQy('lt', ['annualreturnnavy1categoryrank', 50]), FndQy('is-in', ['riskratingoverall', 1, 2, 3]), FndQy('eq', ['exchange', 'NAS'])])},
|
||||
'high_yield_bond': {"sortType":"DESC", "sortField":"fundnetassets",
|
||||
"query": FndQy('and', [FndQy('is-in', ['performanceratingoverall', 4, 5]), FndQy('lt', ['initialinvestment', 100001]), FndQy('lt', ['annualreturnnavy1categoryrank', 50]), FndQy('is-in', ['riskratingoverall', 1, 2, 3]), FndQy('eq', ['categoryname', 'High Yield Bond']), FndQy('eq', ['exchange', 'NAS'])])},
|
||||
'portfolio_anchors': {"sortType":"DESC", "sortField":"fundnetassets",
|
||||
"query": FndQy('and', [FndQy('eq', ['categoryname', 'Large Blend']), FndQy('is-in', ['performanceratingoverall', 4, 5]), FndQy('lt', ['initialinvestment', 100001]), FndQy('lt', ['annualreturnnavy1categoryrank', 50]), FndQy('eq', ['exchange', 'NAS'])])},
|
||||
'solid_large_growth_funds': {"sortType":"DESC", "sortField":"fundnetassets",
|
||||
"query": FndQy('and', [FndQy('eq', ['categoryname', 'Large Growth']), FndQy('is-in', ['performanceratingoverall', 4, 5]), FndQy('lt', ['initialinvestment', 100001]), FndQy('lt', ['annualreturnnavy1categoryrank', 50]), FndQy('eq', ['exchange', 'NAS'])])},
|
||||
'solid_midcap_growth_funds': {"sortType":"DESC", "sortField":"fundnetassets",
|
||||
"query": FndQy('and', [FndQy('eq', ['categoryname', 'Mid-Cap Growth']), FndQy('is-in', ['performanceratingoverall', 4, 5]), FndQy('lt', ['initialinvestment', 100001]), FndQy('lt', ['annualreturnnavy1categoryrank', 50]), FndQy('eq', ['exchange', 'NAS'])])},
|
||||
'top_mutual_funds': {"sortType":"DESC", "sortField":"percentchange",
|
||||
"query": FndQy('and', [FndQy('gt', ['intradayprice', 15]), FndQy('is-in', ['performanceratingoverall', 4, 5]), FndQy('gt', ['initialinvestment', 1000]), FndQy('eq', ['exchange', 'NAS'])])}
|
||||
}
|
||||
|
||||
@dynamic_docstring({"predefined_screeners": generate_list_table_from_dict_universal(PREDEFINED_SCREENER_QUERIES, bullets=True, title='Predefined queries (Dec-2024)')})
|
||||
def screen(query: Union[str, EquityQuery, FundQuery],
|
||||
offset: int = None,
|
||||
size: int = None,
|
||||
sortField: str = None,
|
||||
sortAsc: bool = None,
|
||||
userId: str = None,
|
||||
userIdType: str = None,
|
||||
session = None, proxy = None):
|
||||
"""
|
||||
The `Screener` class is used to execute the queries and return the filtered results.
|
||||
Run a screen: predefined query, or custom query.
|
||||
|
||||
The Screener class provides methods to set and manipulate the body of a screener request,
|
||||
fetch and parse the screener results, and access predefined screener bodies.
|
||||
:Parameters:
|
||||
* Defaults only apply if query = EquityQuery or FundQuery
|
||||
query : str | Query:
|
||||
The query to execute, either name of predefined or custom query.
|
||||
For predefined list run yf.PREDEFINED_SCREENER_QUERIES.keys()
|
||||
offset : int
|
||||
The offset for the results. Default 0.
|
||||
size : int
|
||||
number of results to return. Default 100, maximum 250 (Yahoo)
|
||||
sortField : str
|
||||
field to sort by. Default "ticker"
|
||||
sortAsc : bool
|
||||
Sort ascending? Default False
|
||||
userId : str
|
||||
The user ID. Default empty.
|
||||
userIdType : str
|
||||
Type of user ID (e.g., "guid"). Default "guid".
|
||||
|
||||
Example: predefined query
|
||||
.. code-block:: python
|
||||
|
||||
import yfinance as yf
|
||||
response = yf.screen("aggressive_small_caps")
|
||||
|
||||
Example: custom query
|
||||
.. code-block:: python
|
||||
|
||||
import yfinance as yf
|
||||
from yfinance import EquityQuery
|
||||
q = EquityQuery('and', [
|
||||
EquityQuery('gt', ['percentchange', 3]),
|
||||
EquityQuery('eq', ['region', 'us'])
|
||||
])
|
||||
response = yf.screen(q, sortField = 'percentchange', sortAsc = True)
|
||||
|
||||
To access predefineds query code
|
||||
.. code-block:: python
|
||||
|
||||
import yfinance as yf
|
||||
query = yf.PREDEFINED_SCREENER_QUERIES['aggressive_small_caps']
|
||||
|
||||
{predefined_screeners}
|
||||
"""
|
||||
def __init__(self, session=None, proxy=None):
|
||||
"""
|
||||
Args:
|
||||
session (requests.Session, optional): A requests session object to be used for making HTTP requests. Defaults to None.
|
||||
proxy (str, optional): A proxy URL to be used for making HTTP requests. Defaults to None.
|
||||
|
||||
.. seealso::
|
||||
|
||||
:attr:`Screener.predefined_bodies <yfinance.Screener.predefined_bodies>`
|
||||
supported predefined screens
|
||||
"""
|
||||
self.proxy = proxy
|
||||
self.session = session
|
||||
# Only use defaults when user NOT give a predefined, because
|
||||
# Yahoo's predefined endpoint auto-applies defaults. Also,
|
||||
# that endpoint might be ignoring these fields.
|
||||
defaults = {
|
||||
'offset': 0,
|
||||
'size': 25,
|
||||
'sortField': 'ticker',
|
||||
'sortAsc': False,
|
||||
'userId': "",
|
||||
'userIdType': "guid"
|
||||
}
|
||||
|
||||
self._data: YfData = YfData(session=session)
|
||||
self._body: Dict = {}
|
||||
self._response: Dict = {}
|
||||
self._body_updated = False
|
||||
self._accepted_body_keys = {"offset","size","sortField","sortType","quoteType","query","userId","userIdType"}
|
||||
self._predefined_bodies = PREDEFINED_SCREENER_BODY_MAP.keys()
|
||||
if size is not None and size > 250:
|
||||
raise ValueError("Yahoo limits query size to 250, reduce size.")
|
||||
|
||||
@property
|
||||
def body(self) -> Dict:
|
||||
return self._body
|
||||
|
||||
@property
|
||||
def response(self) -> Dict:
|
||||
"""
|
||||
Fetch screen result
|
||||
fields = dict(locals())
|
||||
for k in ['query', 'session', 'proxy']:
|
||||
if k in fields:
|
||||
del fields[k]
|
||||
|
||||
Example:
|
||||
params_dict = {"corsDomain": "finance.yahoo.com", "formatted": "false", "lang": "en-US", "region": "US"}
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
result = screener.response
|
||||
symbols = [quote['symbol'] for quote in result['quotes']]
|
||||
"""
|
||||
if self._body_updated or self._response is None:
|
||||
self._fetch_and_parse()
|
||||
|
||||
self._body_updated = False
|
||||
return self._response
|
||||
|
||||
@dynamic_docstring({"predefined_screeners": generate_list_table_from_dict_of_dict(PREDEFINED_SCREENER_BODY_MAP,bullets=False)})
|
||||
@property
|
||||
def predefined_bodies(self) -> Dict:
|
||||
"""
|
||||
Predefined Screeners
|
||||
{predefined_screeners}
|
||||
"""
|
||||
return self._predefined_bodies
|
||||
|
||||
def set_default_body(self, query: Query, offset: int = 0, size: int = 100, sortField: str = "ticker", sortType: str = "desc", quoteType: str = "equity", userId: str = "", userIdType: str = "guid") -> None:
|
||||
"""
|
||||
Set the default body using a custom query
|
||||
|
||||
Example:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
screener.set_default_body(qf)
|
||||
"""
|
||||
self._body_updated = True
|
||||
|
||||
self._body = {
|
||||
"offset": offset,
|
||||
"size": size,
|
||||
"sortField": sortField,
|
||||
"sortType": sortType,
|
||||
"quoteType": quoteType,
|
||||
"query": query.to_dict(),
|
||||
"userId": userId,
|
||||
"userIdType": userIdType
|
||||
}
|
||||
|
||||
def set_predefined_body(self, k: str) -> None:
|
||||
"""
|
||||
Set a predefined body
|
||||
|
||||
Example:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
screener.set_predefined_body('day_gainers')
|
||||
|
||||
|
||||
.. seealso::
|
||||
|
||||
:attr:`Screener.predefined_bodies <yfinance.Screener.predefined_bodies>`
|
||||
supported predefined screens
|
||||
"""
|
||||
body = PREDEFINED_SCREENER_BODY_MAP.get(k, None)
|
||||
if not body:
|
||||
raise ValueError(f'Invalid key {k} provided for predefined screener')
|
||||
|
||||
self._body_updated = True
|
||||
self._body = body
|
||||
|
||||
def set_body(self, body: Dict) -> None:
|
||||
"""
|
||||
Set the fully custom body
|
||||
|
||||
Example:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
screener.set_body({
|
||||
"offset": 0,
|
||||
"size": 100,
|
||||
"sortField": "ticker",
|
||||
"sortType": "desc",
|
||||
"quoteType": "equity",
|
||||
"query": qf.to_dict(),
|
||||
"userId": "",
|
||||
"userIdType": "guid"
|
||||
})
|
||||
"""
|
||||
missing_keys = [key for key in self._accepted_body_keys if key not in body]
|
||||
if missing_keys:
|
||||
raise ValueError(f"Missing required keys in body: {missing_keys}")
|
||||
|
||||
extra_keys = [key for key in body if key not in self._accepted_body_keys]
|
||||
if extra_keys:
|
||||
raise ValueError(f"Body contains extra keys: {extra_keys}")
|
||||
|
||||
self._body_updated = True
|
||||
self._body = body
|
||||
|
||||
|
||||
def patch_body(self, values: Dict) -> None:
|
||||
"""
|
||||
Patch parts of the body
|
||||
|
||||
Example:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
screener.patch_body({"offset": 100})
|
||||
"""
|
||||
extra_keys = [key for key in values if key not in self._accepted_body_keys]
|
||||
if extra_keys:
|
||||
raise ValueError(f"Body contains extra keys: {extra_keys}")
|
||||
|
||||
self._body_updated = True
|
||||
for k in values:
|
||||
self._body[k] = values[k]
|
||||
|
||||
def _validate_body(self) -> None:
|
||||
if not all(k in self._body for k in self._accepted_body_keys):
|
||||
raise ValueError("Missing required keys in body")
|
||||
|
||||
def _fetch(self) -> Dict:
|
||||
params_dict = {"corsDomain": "finance.yahoo.com", "formatted": "false", "lang": "en-US", "region": "US"}
|
||||
response = self._data.post(_SCREENER_URL_, body=self.body, user_agent_headers=self._data.user_agent_headers, params=params_dict, proxy=self.proxy)
|
||||
response.raise_for_status()
|
||||
return response.json()
|
||||
|
||||
def _fetch_and_parse(self) -> None:
|
||||
response = None
|
||||
self._validate_body()
|
||||
|
||||
post_query = None
|
||||
if isinstance(query, str):
|
||||
# post_query = PREDEFINED_SCREENER_QUERIES[query]
|
||||
# Switch to Yahoo's predefined endpoint
|
||||
_data = YfData(session=session)
|
||||
params_dict['scrIds'] = query
|
||||
for k,v in fields.items():
|
||||
if v is not None:
|
||||
params_dict[k] = v
|
||||
resp = _data.get(url=_PREDEFINED_URL_, params=params_dict, proxy=proxy)
|
||||
try:
|
||||
response = self._fetch()
|
||||
self._response = response['finance']['result'][0]
|
||||
except Exception as e:
|
||||
logger = utils.get_yf_logger()
|
||||
logger.error(f"Failed to get screener data for '{self._body.get('query', 'query not set')}' reason: {e}")
|
||||
logger.debug("Got response: ")
|
||||
logger.debug("-------------")
|
||||
logger.debug(f" {response}")
|
||||
logger.debug("-------------")
|
||||
resp.raise_for_status()
|
||||
except requests.exceptions.HTTPError:
|
||||
if query not in PREDEFINED_SCREENER_QUERIES:
|
||||
print(f"yfinance.screen: '{query}' is probably not a predefined query.")
|
||||
raise
|
||||
return resp.json()["finance"]["result"][0]
|
||||
|
||||
elif isinstance(query, QueryBase):
|
||||
# Prepare other fields
|
||||
for k in defaults:
|
||||
if k not in fields or fields[k] is None:
|
||||
fields[k] = defaults[k]
|
||||
fields['sortType'] = 'ASC' if fields['sortAsc'] else 'DESC'
|
||||
del fields['sortAsc']
|
||||
|
||||
post_query = fields
|
||||
post_query['query'] = query
|
||||
|
||||
else:
|
||||
raise ValueError(f'Query must be type str or QueryBase, not "{type(query)}"')
|
||||
|
||||
if query is None:
|
||||
raise ValueError('No query provided')
|
||||
|
||||
if isinstance(post_query['query'], EqyQy):
|
||||
post_query['quoteType'] = 'EQUITY'
|
||||
elif isinstance(post_query['query'], FndQy):
|
||||
post_query['quoteType'] = 'MUTUALFUND'
|
||||
post_query['query'] = post_query['query'].to_dict()
|
||||
|
||||
# Fetch
|
||||
_data = YfData(session=session)
|
||||
response = _data.post(_SCREENER_URL_,
|
||||
body=post_query,
|
||||
user_agent_headers=_data.user_agent_headers,
|
||||
params=params_dict,
|
||||
proxy=proxy)
|
||||
response.raise_for_status()
|
||||
return response.json()['finance']['result'][0]
|
||||
|
||||
@@ -1,145 +0,0 @@
|
||||
from abc import ABC, abstractmethod
|
||||
import numbers
|
||||
from typing import List, Union, Dict
|
||||
|
||||
from yfinance.const import EQUITY_SCREENER_EQ_MAP, EQUITY_SCREENER_FIELDS
|
||||
from yfinance.exceptions import YFNotImplementedError
|
||||
from ..utils import dynamic_docstring, generate_list_table_from_dict
|
||||
|
||||
class Query(ABC):
|
||||
def __init__(self, operator: str, operand: Union[numbers.Real, str, List['Query']]):
|
||||
self.operator = operator
|
||||
self.operands = operand
|
||||
|
||||
@abstractmethod
|
||||
def to_dict(self) -> Dict:
|
||||
raise YFNotImplementedError('to_dict() needs to be implemented by children classes')
|
||||
|
||||
class EquityQuery(Query):
|
||||
"""
|
||||
The `EquityQuery` class constructs filters for stocks based on specific criteria such as region, sector, exchange, and peer group.
|
||||
|
||||
The queries support operators: `GT` (greater than), `LT` (less than), `BTWN` (between), `EQ` (equals), and logical operators `AND` and `OR` for combining multiple conditions.
|
||||
|
||||
Example:
|
||||
Screen for stocks where the end-of-day price is greater than 3.
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
gt = yf.EquityQuery('gt', ['eodprice', 3])
|
||||
|
||||
Screen for stocks where the average daily volume over the last 3 months is less than a very large number.
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
lt = yf.EquityQuery('lt', ['avgdailyvol3m', 99999999999])
|
||||
|
||||
Screen for stocks where the intraday market cap is between 0 and 100 million.
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
btwn = yf.EquityQuery('btwn', ['intradaymarketcap', 0, 100000000])
|
||||
|
||||
Screen for stocks in the Technology sector.
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
eq = yf.EquityQuery('eq', ['sector', 'Technology'])
|
||||
|
||||
Combine queries using AND/OR.
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
qt = yf.EquityQuery('and', [gt, lt])
|
||||
qf = yf.EquityQuery('or', [qt, btwn, eq])
|
||||
"""
|
||||
def __init__(self, operator: str, operand: Union[numbers.Real, str, List['EquityQuery']]):
|
||||
"""
|
||||
.. seealso::
|
||||
|
||||
:attr:`EquityQuery.valid_operand_fields <yfinance.EquityQuery.valid_operand_fields>`
|
||||
supported operand values for query
|
||||
:attr:`EquityQuery.valid_eq_operand_map <yfinance.EquityQuery.valid_eq_operand_map>`
|
||||
supported `EQ query operand parameters`
|
||||
"""
|
||||
operator = operator.upper()
|
||||
|
||||
if not isinstance(operand, list):
|
||||
raise TypeError('Invalid operand type')
|
||||
if len(operand) <= 0:
|
||||
raise ValueError('Invalid field for Screener')
|
||||
|
||||
if operator in {'OR','AND'}:
|
||||
self._validate_or_and_operand(operand)
|
||||
elif operator == 'EQ':
|
||||
self._validate_eq_operand(operand)
|
||||
elif operator == 'BTWN':
|
||||
self._validate_btwn_operand(operand)
|
||||
elif operator in {'GT','LT'}:
|
||||
self._validate_gt_lt(operand)
|
||||
else:
|
||||
raise ValueError('Invalid Operator Value')
|
||||
|
||||
self.operator = operator
|
||||
self.operands = operand
|
||||
self._valid_eq_operand_map = EQUITY_SCREENER_EQ_MAP
|
||||
self._valid_operand_fields = EQUITY_SCREENER_FIELDS
|
||||
|
||||
@dynamic_docstring({"valid_eq_operand_map_table": generate_list_table_from_dict(EQUITY_SCREENER_EQ_MAP)})
|
||||
@property
|
||||
def valid_eq_operand_map(self) -> Dict:
|
||||
"""
|
||||
Valid Operand Map for Operator "EQ"
|
||||
{valid_eq_operand_map_table}
|
||||
"""
|
||||
return self._valid_eq_operand_map
|
||||
|
||||
@dynamic_docstring({"valid_operand_fields_table": generate_list_table_from_dict(EQUITY_SCREENER_FIELDS)})
|
||||
@property
|
||||
def valid_operand_fields(self) -> Dict:
|
||||
"""
|
||||
Valid Operand Fields
|
||||
{valid_operand_fields_table}
|
||||
"""
|
||||
return self._valid_operand_fields
|
||||
|
||||
def _validate_or_and_operand(self, operand: List['EquityQuery']) -> None:
|
||||
if len(operand) <= 1:
|
||||
raise ValueError('Operand must be length longer than 1')
|
||||
if all(isinstance(e, EquityQuery) for e in operand) is False:
|
||||
raise TypeError('Operand must be type EquityQuery for OR/AND')
|
||||
|
||||
def _validate_eq_operand(self, operand: List[Union[str, numbers.Real]]) -> None:
|
||||
if len(operand) != 2:
|
||||
raise ValueError('Operand must be length 2 for EQ')
|
||||
|
||||
if not any(operand[0] in fields_by_type for fields_by_type in EQUITY_SCREENER_FIELDS.values()):
|
||||
raise ValueError('Invalid field for Screener')
|
||||
if operand[0] not in EQUITY_SCREENER_EQ_MAP:
|
||||
raise ValueError('Invalid EQ key')
|
||||
if operand[1] not in EQUITY_SCREENER_EQ_MAP[operand[0]]:
|
||||
raise ValueError('Invalid EQ value')
|
||||
|
||||
def _validate_btwn_operand(self, operand: List[Union[str, numbers.Real]]) -> None:
|
||||
if len(operand) != 3:
|
||||
raise ValueError('Operand must be length 3 for BTWN')
|
||||
if not any(operand[0] in fields_by_type for fields_by_type in EQUITY_SCREENER_FIELDS.values()):
|
||||
raise ValueError('Invalid field for Screener')
|
||||
if isinstance(operand[1], numbers.Real) is False:
|
||||
raise TypeError('Invalid comparison type for BTWN')
|
||||
if isinstance(operand[2], numbers.Real) is False:
|
||||
raise TypeError('Invalid comparison type for BTWN')
|
||||
|
||||
def _validate_gt_lt(self, operand: List[Union[str, numbers.Real]]) -> None:
|
||||
if len(operand) != 2:
|
||||
raise ValueError('Operand must be length 2 for GT/LT')
|
||||
if not any(operand[0] in fields_by_type for fields_by_type in EQUITY_SCREENER_FIELDS.values()):
|
||||
raise ValueError('Invalid field for Screener')
|
||||
if isinstance(operand[1], numbers.Real) is False:
|
||||
raise TypeError('Invalid comparison type for GT/LT')
|
||||
|
||||
def to_dict(self) -> Dict:
|
||||
return {
|
||||
"operator": self.operator,
|
||||
"operands": [operand.to_dict() if isinstance(operand, EquityQuery) else operand for operand in self.operands]
|
||||
}
|
||||
158
yfinance/search.py
Normal file
158
yfinance/search.py
Normal file
@@ -0,0 +1,158 @@
|
||||
#!/usr/bin/env python
|
||||
# -*- coding: utf-8 -*-
|
||||
#
|
||||
# yfinance - market data downloader
|
||||
# https://github.com/ranaroussi/yfinance
|
||||
#
|
||||
# Copyright 2017-2019 Ran Aroussi
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
|
||||
import json as _json
|
||||
|
||||
from . import utils
|
||||
from .const import _BASE_URL_
|
||||
from .data import YfData
|
||||
|
||||
|
||||
class Search:
|
||||
def __init__(self, query, max_results=8, news_count=8, lists_count=8, include_cb=True, include_nav_links=False,
|
||||
include_research=False, include_cultural_assets=False, enable_fuzzy_query=False, recommended=8,
|
||||
session=None, proxy=None, timeout=30, raise_errors=True):
|
||||
"""
|
||||
Fetches and organizes search results from Yahoo Finance, including stock quotes and news articles.
|
||||
|
||||
Args:
|
||||
query: The search query (ticker symbol or company name).
|
||||
max_results: Maximum number of stock quotes to return (default 8).
|
||||
news_count: Number of news articles to include (default 8).
|
||||
lists_count: Number of lists to include (default 8).
|
||||
include_cb: Include the company breakdown (default True).
|
||||
include_nav_links: Include the navigation links (default False).
|
||||
include_research: Include the research reports (default False).
|
||||
include_cultural_assets: Include the cultural assets (default False).
|
||||
enable_fuzzy_query: Enable fuzzy search for typos (default False).
|
||||
recommended: Recommended number of results to return (default 8).
|
||||
session: Custom HTTP session for requests (default None).
|
||||
proxy: Proxy settings for requests (default None).
|
||||
timeout: Request timeout in seconds (default 30).
|
||||
raise_errors: Raise exceptions on error (default True).
|
||||
"""
|
||||
self.query = query
|
||||
self.max_results = max_results
|
||||
self.enable_fuzzy_query = enable_fuzzy_query
|
||||
self.news_count = news_count
|
||||
self.session = session
|
||||
self.proxy = proxy
|
||||
self.timeout = timeout
|
||||
self.raise_errors = raise_errors
|
||||
|
||||
self.lists_count = lists_count
|
||||
self.include_cb = include_cb
|
||||
self.nav_links = include_nav_links
|
||||
self.enable_research = include_research
|
||||
self.enable_cultural_assets = include_cultural_assets
|
||||
self.recommended = recommended
|
||||
|
||||
self._data = YfData(session=self.session)
|
||||
self._logger = utils.get_yf_logger()
|
||||
|
||||
self._response = {}
|
||||
self._all = {}
|
||||
self._quotes = []
|
||||
self._news = []
|
||||
self._lists = []
|
||||
self._research = []
|
||||
self._nav = []
|
||||
|
||||
self.search()
|
||||
|
||||
def search(self) -> 'Search':
|
||||
"""Search using the query parameters defined in the constructor."""
|
||||
url = f"{_BASE_URL_}/v1/finance/search"
|
||||
params = {
|
||||
"q": self.query,
|
||||
"quotesCount": self.max_results,
|
||||
"enableFuzzyQuery": self.enable_fuzzy_query,
|
||||
"newsCount": self.news_count,
|
||||
"quotesQueryId": "tss_match_phrase_query",
|
||||
"newsQueryId": "news_cie_vespa",
|
||||
"listsCount": self.lists_count,
|
||||
"enableCb": self.include_cb,
|
||||
"enableNavLinks": self.nav_links,
|
||||
"enableResearchReports": self.enable_research,
|
||||
"enableCulturalAssets": self.enable_cultural_assets,
|
||||
"recommendedCount": self.recommended
|
||||
}
|
||||
|
||||
self._logger.debug(f'{self.query}: Yahoo GET parameters: {str(dict(params))}')
|
||||
|
||||
data = self._data.cache_get(url=url, params=params, proxy=self.proxy, timeout=self.timeout)
|
||||
if data is None or "Will be right back" in data.text:
|
||||
raise RuntimeError("*** YAHOO! FINANCE IS CURRENTLY DOWN! ***\n"
|
||||
"Our engineers are working quickly to resolve "
|
||||
"the issue. Thank you for your patience.")
|
||||
try:
|
||||
data = data.json()
|
||||
except _json.JSONDecodeError:
|
||||
self._logger.error(f"{self.query}: Failed to retrieve search results and received faulty response instead.")
|
||||
data = {}
|
||||
|
||||
self._response = data
|
||||
# Filter quotes to only include symbols
|
||||
self._quotes = [quote for quote in data.get("quotes", []) if "symbol" in quote]
|
||||
self._news = data.get("news", [])
|
||||
self._lists = data.get("lists", [])
|
||||
self._research = data.get("researchReports", [])
|
||||
self._nav = data.get("nav", [])
|
||||
|
||||
self._all = {"quotes": self._quotes, "news": self._news, "lists": self._lists, "research": self._research,
|
||||
"nav": self._nav}
|
||||
|
||||
return self
|
||||
|
||||
@property
|
||||
def quotes(self) -> 'list':
|
||||
"""Get the quotes from the search results."""
|
||||
return self._quotes
|
||||
|
||||
@property
|
||||
def news(self) -> 'list':
|
||||
"""Get the news from the search results."""
|
||||
return self._news
|
||||
|
||||
@property
|
||||
def lists(self) -> 'list':
|
||||
"""Get the lists from the search results."""
|
||||
return self._lists
|
||||
|
||||
@property
|
||||
def research(self) -> 'list':
|
||||
"""Get the research reports from the search results."""
|
||||
return self._research
|
||||
|
||||
@property
|
||||
def nav(self) -> 'list':
|
||||
"""Get the navigation links from the search results."""
|
||||
return self._nav
|
||||
|
||||
@property
|
||||
def all(self) -> 'dict[str,list]':
|
||||
"""Get all the results from the search results: filtered down version of response."""
|
||||
return self._all
|
||||
|
||||
@property
|
||||
def response(self) -> 'dict':
|
||||
"""Get the raw response from the search results."""
|
||||
return self._response
|
||||
@@ -23,6 +23,7 @@ from __future__ import print_function
|
||||
|
||||
import datetime as _datetime
|
||||
import logging
|
||||
import re
|
||||
import re as _re
|
||||
import sys as _sys
|
||||
import threading
|
||||
@@ -39,7 +40,6 @@ from dateutil.relativedelta import relativedelta
|
||||
from pytz import UnknownTimeZoneError
|
||||
|
||||
from yfinance import const
|
||||
from .const import _BASE_URL_
|
||||
|
||||
user_agent_headers = {
|
||||
'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/39.0.2171.95 Safari/537.36'}
|
||||
@@ -189,24 +189,27 @@ def is_isin(string):
|
||||
def get_all_by_isin(isin, proxy=None, session=None):
|
||||
if not (is_isin(isin)):
|
||||
raise ValueError("Invalid ISIN number")
|
||||
|
||||
# Deferred this to prevent circular imports
|
||||
from .search import Search
|
||||
|
||||
session = session or _requests
|
||||
url = f"{_BASE_URL_}/v1/finance/search?q={isin}"
|
||||
data = session.get(url=url, proxies=proxy, headers=user_agent_headers)
|
||||
try:
|
||||
data = data.json()
|
||||
ticker = data.get('quotes', [{}])[0]
|
||||
return {
|
||||
'ticker': {
|
||||
'symbol': ticker['symbol'],
|
||||
'shortname': ticker['shortname'],
|
||||
'longname': ticker.get('longname',''),
|
||||
'type': ticker['quoteType'],
|
||||
'exchange': ticker['exchDisp'],
|
||||
},
|
||||
'news': data.get('news', [])
|
||||
}
|
||||
except Exception:
|
||||
return {}
|
||||
search = Search(query=isin, max_results=1, session=session, proxy=proxy)
|
||||
|
||||
# Extract the first quote and news
|
||||
ticker = search.quotes[0] if search.quotes else {}
|
||||
news = search.news
|
||||
|
||||
return {
|
||||
'ticker': {
|
||||
'symbol': ticker.get('symbol', ''),
|
||||
'shortname': ticker.get('shortname', ''),
|
||||
'longname': ticker.get('longname', ''),
|
||||
'type': ticker.get('quoteType', ''),
|
||||
'exchange': ticker.get('exchDisp', ''),
|
||||
},
|
||||
'news': news
|
||||
}
|
||||
|
||||
|
||||
def get_ticker_by_isin(isin, proxy=None, session=None):
|
||||
@@ -427,26 +430,28 @@ def _parse_user_dt(dt, exchange_tz):
|
||||
|
||||
|
||||
def _interval_to_timedelta(interval):
|
||||
if interval == "1mo":
|
||||
return relativedelta(months=1)
|
||||
elif interval == "3mo":
|
||||
return relativedelta(months=3)
|
||||
elif interval == "6mo":
|
||||
return relativedelta(months=6)
|
||||
elif interval == "1y":
|
||||
return relativedelta(years=1)
|
||||
elif interval == "2y":
|
||||
return relativedelta(years=2)
|
||||
elif interval == "5y":
|
||||
return relativedelta(years=5)
|
||||
elif interval == "10y":
|
||||
return relativedelta(years=10)
|
||||
elif interval == "1wk":
|
||||
return _pd.Timedelta(days=7)
|
||||
if interval[-1] == "d":
|
||||
return relativedelta(days=int(interval[:-1]))
|
||||
elif interval[-2:] == "wk":
|
||||
return relativedelta(weeks=int(interval[:-2]))
|
||||
elif interval[-2:] == "mo":
|
||||
return relativedelta(months=int(interval[:-2]))
|
||||
elif interval[-1] == "y":
|
||||
return relativedelta(years=int(interval[:-1]))
|
||||
else:
|
||||
return _pd.Timedelta(interval)
|
||||
|
||||
|
||||
def is_valid_period_format(period):
|
||||
"""Check if the provided period has a valid format."""
|
||||
if period is None:
|
||||
return False
|
||||
|
||||
# Regex pattern to match valid period formats like '1d', '2wk', '3mo', '1y'
|
||||
valid_pattern = r"^[1-9]\d*(d|wk|mo|y)$"
|
||||
return bool(re.match(valid_pattern, period))
|
||||
|
||||
|
||||
def auto_adjust(data):
|
||||
col_order = data.columns
|
||||
df = data.copy()
|
||||
@@ -949,10 +954,12 @@ def dynamic_docstring(placeholders: dict):
|
||||
return func
|
||||
return decorator
|
||||
|
||||
def _generate_table_configurations() -> str:
|
||||
def _generate_table_configurations(title = None) -> str:
|
||||
import textwrap
|
||||
table = textwrap.dedent("""
|
||||
.. list-table:: Permitted Keys/Values
|
||||
if title is None:
|
||||
title = "Permitted Keys/Values"
|
||||
table = textwrap.dedent(f"""
|
||||
.. list-table:: {title}
|
||||
:widths: 25 75
|
||||
:header-rows: 1
|
||||
|
||||
@@ -962,34 +969,134 @@ def _generate_table_configurations() -> str:
|
||||
|
||||
return table
|
||||
|
||||
def generate_list_table_from_dict(data: dict, bullets: bool=True) -> str:
|
||||
def generate_list_table_from_dict(data: dict, bullets: bool=True, title: str=None) -> str:
|
||||
"""
|
||||
Generate a list-table for the docstring showing permitted keys/values.
|
||||
"""
|
||||
table = _generate_table_configurations()
|
||||
for key, values in data.items():
|
||||
value_str = ', '.join(sorted(values))
|
||||
table += f" * - {key}\n"
|
||||
if bullets:
|
||||
table += " -\n"
|
||||
table = _generate_table_configurations(title)
|
||||
for k in sorted(data.keys()):
|
||||
values = data[k]
|
||||
table += ' '*3 + f"* - {k}\n"
|
||||
lengths = [len(str(v)) for v in values]
|
||||
if bullets and max(lengths) > 5:
|
||||
table += ' '*5 + "-\n"
|
||||
for value in sorted(values):
|
||||
table += f" - {value}\n"
|
||||
table += ' '*7 + f"- {value}\n"
|
||||
else:
|
||||
table += f" - {value_str}\n"
|
||||
value_str = ', '.join(sorted(values))
|
||||
table += ' '*5 + f"- {value_str}\n"
|
||||
return table
|
||||
|
||||
def generate_list_table_from_dict_of_dict(data: dict, bullets: bool=True) -> str:
|
||||
# def generate_list_table_from_dict_of_dict(data: dict, bullets: bool=True, title: str=None) -> str:
|
||||
# """
|
||||
# Generate a list-table for the docstring showing permitted keys/values.
|
||||
# """
|
||||
# table = _generate_table_configurations(title)
|
||||
# for k in sorted(data.keys()):
|
||||
# values = data[k]
|
||||
# table += ' '*3 + f"* - {k}\n"
|
||||
# if bullets:
|
||||
# table += ' '*5 + "-\n"
|
||||
# for value in sorted(values):
|
||||
# table += ' '*7 + f"- {value}\n"
|
||||
# else:
|
||||
# table += ' '*5 + f"- {values}\n"
|
||||
# return table
|
||||
|
||||
|
||||
def generate_list_table_from_dict_universal(data: dict, bullets: bool=True, title: str=None, concat_keys=[]) -> str:
|
||||
"""
|
||||
Generate a list-table for the docstring showing permitted keys/values.
|
||||
"""
|
||||
table = _generate_table_configurations()
|
||||
for key, values in data.items():
|
||||
value_str = values
|
||||
table += f" * - {key}\n"
|
||||
if bullets:
|
||||
table += " -\n"
|
||||
for value in sorted(values):
|
||||
table += f" - {value}\n"
|
||||
table = _generate_table_configurations(title)
|
||||
for k in data.keys():
|
||||
values = data[k]
|
||||
|
||||
table += ' '*3 + f"* - {k}\n"
|
||||
if isinstance(values, dict):
|
||||
table_add = ''
|
||||
|
||||
concat_short_lines = k in concat_keys
|
||||
|
||||
if bullets:
|
||||
k_keys = sorted(list(values.keys()))
|
||||
current_line = ''
|
||||
block_format = 'query' in k_keys
|
||||
for i in range(len(k_keys)):
|
||||
k2 = k_keys[i]
|
||||
k2_values = values[k2]
|
||||
k2_values_str = None
|
||||
if isinstance(k2_values, set):
|
||||
k2_values = list(k2_values)
|
||||
elif isinstance(k2_values, dict) and len(k2_values) == 0:
|
||||
k2_values = []
|
||||
if isinstance(k2_values, list):
|
||||
k2_values = sorted(k2_values)
|
||||
all_scalar = all(isinstance(k2v, (int, float, str)) for k2v in k2_values)
|
||||
if all_scalar:
|
||||
k2_values_str = _re.sub(r"[{}\[\]']", "", str(k2_values))
|
||||
|
||||
if k2_values_str is None:
|
||||
k2_values_str = str(k2_values)
|
||||
|
||||
if len(current_line) > 0 and (len(current_line) + len(k2_values_str) > 40):
|
||||
# new line
|
||||
table_add += current_line + '\n'
|
||||
current_line = ''
|
||||
|
||||
if concat_short_lines:
|
||||
if current_line == '':
|
||||
current_line += ' '*5
|
||||
if i == 0:
|
||||
# Only add dash to first
|
||||
current_line += "- "
|
||||
else:
|
||||
current_line += " "
|
||||
# Don't draw bullet points:
|
||||
current_line += '| '
|
||||
else:
|
||||
current_line += '. '
|
||||
current_line += f"{k2}: " + k2_values_str
|
||||
else:
|
||||
table_add += ' '*5
|
||||
if i == 0:
|
||||
# Only add dash to first
|
||||
table_add += "- "
|
||||
else:
|
||||
table_add += " "
|
||||
|
||||
if '\n' in k2_values_str:
|
||||
# Block format multiple lines
|
||||
table_add += '| ' + f"{k2}: " + "\n"
|
||||
k2_values_str_lines = k2_values_str.split('\n')
|
||||
for j in range(len(k2_values_str_lines)):
|
||||
line = k2_values_str_lines[j]
|
||||
table_add += ' '*7 + '|' + ' '*5 + line
|
||||
if j < len(k2_values_str_lines)-1:
|
||||
table_add += "\n"
|
||||
else:
|
||||
if block_format:
|
||||
table_add += '| '
|
||||
else:
|
||||
table_add += '* '
|
||||
table_add += f"{k2}: " + k2_values_str
|
||||
|
||||
table_add += "\n"
|
||||
if current_line != '':
|
||||
table_add += current_line + '\n'
|
||||
else:
|
||||
table_add += ' '*5 + f"- {values}\n"
|
||||
|
||||
table += table_add
|
||||
|
||||
else:
|
||||
table += f" - {value_str}\n"
|
||||
return table
|
||||
lengths = [len(str(v)) for v in values]
|
||||
if bullets and max(lengths) > 5:
|
||||
table += ' '*5 + "-\n"
|
||||
for value in sorted(values):
|
||||
table += ' '*7 + f"- {value}\n"
|
||||
else:
|
||||
value_str = ', '.join(sorted(values))
|
||||
table += ' '*5 + f"- {value_str}\n"
|
||||
|
||||
return table
|
||||
|
||||
@@ -1 +1 @@
|
||||
version = "0.2.50"
|
||||
version = "0.2.53"
|
||||
|
||||
Reference in New Issue
Block a user